repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
Titulacion-Sistemas/PythonTitulacion-EV
refs/heads/master
Lib/site-packages/simplejson/tests/test_decimal.py
147
import decimal from decimal import Decimal from unittest import TestCase from simplejson.compat import StringIO, reload_module import simplejson as json class TestDecimal(TestCase): NUMS = "1.0", "10.00", "1.1", "1234567890.1234567890", "500" def dumps(self, obj, **kw): sio = StringIO() json.dump(obj, sio, **kw) res = json.dumps(obj, **kw) self.assertEqual(res, sio.getvalue()) return res def loads(self, s, **kw): sio = StringIO(s) res = json.loads(s, **kw) self.assertEqual(res, json.load(sio, **kw)) return res def test_decimal_encode(self): for d in map(Decimal, self.NUMS): self.assertEqual(self.dumps(d, use_decimal=True), str(d)) def test_decimal_decode(self): for s in self.NUMS: self.assertEqual(self.loads(s, parse_float=Decimal), Decimal(s)) def test_stringify_key(self): for d in map(Decimal, self.NUMS): v = {d: d} self.assertEqual( self.loads( self.dumps(v, use_decimal=True), parse_float=Decimal), {str(d): d}) def test_decimal_roundtrip(self): for d in map(Decimal, self.NUMS): # The type might not be the same (int and Decimal) but they # should still compare equal. for v in [d, [d], {'': d}]: self.assertEqual( self.loads( self.dumps(v, use_decimal=True), parse_float=Decimal), v) def test_decimal_defaults(self): d = Decimal('1.1') # use_decimal=True is the default self.assertRaises(TypeError, json.dumps, d, use_decimal=False) self.assertEqual('1.1', json.dumps(d)) self.assertEqual('1.1', json.dumps(d, use_decimal=True)) self.assertRaises(TypeError, json.dump, d, StringIO(), use_decimal=False) sio = StringIO() json.dump(d, sio) self.assertEqual('1.1', sio.getvalue()) sio = StringIO() json.dump(d, sio, use_decimal=True) self.assertEqual('1.1', sio.getvalue()) def test_decimal_reload(self): # Simulate a subinterpreter that reloads the Python modules but not # the C code https://github.com/simplejson/simplejson/issues/34 global Decimal Decimal = reload_module(decimal).Decimal import simplejson.encoder simplejson.encoder.Decimal = Decimal self.test_decimal_roundtrip()
fresskarma/tinyos-1.x
refs/heads/master
contrib/nestfe/python/MetricsMonitor.py
2
# Monitoring tools for interacting with MetricsMote and TestDetectionEvent # with the KrakenMetrics module # # USAGE: # 1) setup your PYTHONPATH to include the directory containing # this file # -- Option I -- # 2a) Start PyTOS using PytosShell.py in the TestDetectionEvent or # MetricsMote directory # 2b) assign appType appropriately at the console # Valid options are: "TestDetectionEvent", "MetricsMote" # (see bottom of file for details) # 2c) at the PyTOS command prompt type # from MetricsMonitor import * # -- Option II -- # 2) Start up PyTOS using MetricsShell.py # 3) Interact with Program via keypresses # # See nestfe/nesc/apps/MetricsTools for more details # See also nestfe/scripts/metrics/sh for startup examples # # KNOWN LIMITATIONS : # * The queue sizes of the various threads put a limit on the rates with # which the thread poll. Particularly, you should check that the size # of monQ is adequate for slow polling rates. # # IMPLEMENTATION: # For MetricsMonitor, the general message handling flow is: # 1) messages arrive and are processed by the message handling thread # running processMessages(). Messages are distributed to the # monitoring thread through monQ. # 2) the monitoring thread processes the messages and outputs to the # screen. The user can supress/configure the output by typing at # the console. import os, sys import threading from Queue import Queue from time import sleep, time #Pytos stuff from __main__ import app from __main__ import appType import pytos.Comm as Comm from pytos.Comm import MessageQueue import pytos.util.KeyPress as KeyPress ##### See bottom of file for executed code when module is imported ##### #################### External Functions #################### class MetricsMonitor( object ): def __init__(self,sendComm,recvComm,appType="MetricsMote"): """ Creation of MetricsMonitor object to allow background thread processing. appType - Flag to determine how to parse messages (MetricsMote, TestDetectionEvent) recvComm - Comm object for receiving messages sendComm - Comm object for sending messages """ self.appType = appType # for debugging ## Display Configurations print "\nType \"h\" for help\n" self.keyPress = KeyPress.KeyPress() self.verbosity = '4' self.numLostUpdates = 0 self.outputFlag = True ## Monitor Thread self.monQ = Queue(1000) # large to be safe # Data Structure # all dictionaries use nodeID for keys # monTable entries: [seqNo,timeStamp] # monStats entries: list of [numReply,numSent,succRate] # monPrev entries: previously seen sequence number self.monTable = {} self.monStats = {} self.monPrevSeqNo = {} # updateType - 'time' or 'pkts' # 'pkts' means we wait to receive updatePeriod number # of recognized pkts before reporting # updatePeriod - number of seconds or number of packets # between updating statistics self.updateType ='time' self.updatePeriod = 2 self.minPeriod = 0.1 # limits for updatePeriod self.maxPeriod = 10 # limits for updatePeriod self.running = True self.forceContinue = False # Used to force pkt updates to not wait on monQ self.monSemaphore = threading.Semaphore() monThread = threading.Thread(target=self.processRates, args=()) monThread.setDaemon(True) monThread.start() ## Message Handling self.sendComm = sendComm self.recvComm = recvComm msgQ = MessageQueue(10); if (appType == "TestDetectionEvent"): # recvComm is a drain connection (shouldn't matter) self.msgList = [app.msgs.MetricsReplyMsg, app.msgs.DetectionEventMsg] self.AM_METRICSREPLYMSG = app.enums.MetricsTypes.AM_METRICSREPLYMSG self.AM_DETECTIONEVENTMSG = app.enums.AM_DETECTIONEVENTMSG else: # assume appType == MetricsMote self.msgList = [app.msgs.MetricsReplyMsg] self.AM_METRICSREPLYMSG = app.enums.MetricsTypes.AM_METRICSREPLYMSG self.AM_DETECTIONEVENTMSG = None for msgName in self.msgList: recvComm.register(msgName, msgQ) msgThread = threading.Thread(target=self.processMessages, args=(msgQ,)) msgThread.setDaemon(True) msgThread.start() ##### Threading and Processing Code ##### def processMessages(self,msgQ): """ Thread message handling code. Message handling involves filtering for the proper message and distributing messages to the monitoring queue. Could eliminate monQ and make one thread, but if it ain't broke don't fix it. """ while self.running : (addr,msg) = msgQ.get() if (((msg.amType == self.AM_METRICSREPLYMSG) and (msg.msgType == app.enums.MetricsTypes.CONST_REPORT_REPLY)) or (msg.amType == self.AM_DETECTIONEVENTMSG)): self.monQ.put(msg) def transSuccCalc(self): """ Called by processRates() for calculations involving transmission success rate PRECONDITION: * motes send packets starting with counter/sequence number 1 * monTable only contains packets from the last updatePeriod POSTCONDITION: self.monStats updated with 1) transmission success rate * for time monitoring: this is calculated assuming no packets missed at the end of a period (these missing packets are accounted for at the beginning of the next period) * for packet monitoring: "sent" pkts / received pkts (see below) * -1 means no packets received 2) number of packets received (for time monitoring) 3) number of packets "sent" * counts the number of packets between last recorded sequence number and the last packet received """ for nodeID in sorted(self.monTable.keys()): monList = sorted(self.monTable[nodeID]) lastSeqNo = monList[len(monList)-1] numReply = len(monList) if self.monPrevSeqNo.has_key(nodeID): numSent = lastSeqNo - self.monPrevSeqNo[nodeID] else: numSent = lastSeqNo # assume start from 1 if (numSent == 0): succRate = -1 else: succRate = numReply/numSent self.monPrevSeqNo[nodeID] = lastSeqNo if self.monStats.has_key(nodeID): self.monStats[nodeID].append([numReply,numSent,succRate]) else: self.monStats[nodeID] = [[numReply,numSent,succRate]] def insertMonTable(self,msg): """ Performs unpacking of different message types and inserts into monitoring table. Used by processRates(). """ if (msg.amType == self.AM_METRICSREPLYMSG): nodeID = msg.nodeID counter = msg.data # sequence number elif (msg.amType == self.AM_DETECTIONEVENTMSG): nodeID = msg.parentMsg.source counter = msg.count if self.monTable.has_key(nodeID): self.monTable[nodeID].append(counter) else: self.monTable[nodeID] = [counter] def processRates(self): """ Monitor Thread code. Monitors Packets dumped in monQ. The modes of operation (updateType, updatePeriod) can change during execution. updateType - flag of "time" or "pkts" for operating mode updatePeriod - time/num packets between monitoring updates METHOD: Dumps data into a table (for consistent state), then processes it. Erases previous monitoring statistics before proceeding. POSTCONDITION: Under 'time' monitoring mode, does not display statistics for last period if we stop the thread """ self.monStats = {} startTime = time() while self.running: self.forceContinue = False self.monTable = {} self.monSemaphore.acquire() # synchronize operations updateType = self.updateType updatePeriod = self.updatePeriod self.monSemaphore.release() # synch if (updateType == "time"): startTime = time() # Dump data into a table pktCnt = 0 while not self.monQ.empty(): msg = self.monQ.get(); self.insertMonTable(msg) pktCnt += 1 recvPkts = pktCnt self.transSuccCalc() self.dispMonitor(recvPkts,updatePeriod) # Wait for next updatePeriod processTime = time() - startTime sleep(max(0,updatePeriod-processTime)) elif (updateType == "pkts"): # Dump data into a table pktCnt = 0 # allows for quitting monitoring thread if no pkts arrive while (self.running and (not self.forceContinue) and (pktCnt <= updatePeriod)): try: msg = self.monQ.get(True,2) self.insertMonTable(msg) pktCnt += 1 except: pass stopTime = time() pktWaitTime = stopTime - startTime startTime = stopTime recvPkts = pktCnt - 1 self.transSuccCalc() self.dispMonitor(recvPkts,pktWaitTime) else: print "Unknown update type for monitoring: %s" %(updateType) break print "Monitoring Thread Finished." ##### Display Code ##### def dispMonitor(self,recvPkts,pktWaitTime): """ Displays monitoring output, based on verbosity levels. """ # All verbosity levels transRateStr = "Transmission Rate (%d/%d) %.3f pkts/sec\n" \ %(recvPkts,pktWaitTime,recvPkts/pktWaitTime) separatorStr = "******************************\n" if (int(self.verbosity) > 5): for nodeID,val in self.monStats.iteritems(): recentStat = val[len(val)-1] (numReply,numSent,succRate) = recentStat transSuccStr = transSuccStr + \ "Node: %d success rate %d/%d (%d %%)\n" \ %(nodeID,numReply,numSent,succRate*100) else: transSuccStr = "" # Display Output dispStr = transSuccStr + separatorStr + transRateStr if (self.outputFlag == True): print dispStr else : self.numLostUpdates += 1 def readKeys(self) : while self.running : try : key = self.keyPress.getChar(blocking=True) { 'q': self.quit, #sys.exit, '': self.quit, #sys.exit, 'c': self.configure, 'h': self.help, 'l': self.drawLine, 'm': self.dispMode, 'p': self.pause, '1': lambda : self.setVerbosity(key), '2': lambda : self.setVerbosity(key), '3': lambda : self.setVerbosity(key), '4': lambda : self.setVerbosity(key), '5': lambda : self.setVerbosity(key), '6': lambda : self.setVerbosity(key), '7': lambda : self.setVerbosity(key), '8': lambda : self.setVerbosity(key), '9': lambda : self.setVerbosity(key), }[key]() except Exception, e: if len(e.args)>0: print e.args[0] else : raise print "key %s not understood. Press \"h\" for help" % key def setVerbosity(self, verbosity) : self.verbosity = verbosity print "Verbosity is now %s" % self.verbosity def printLostUpdates(self): banner = "\nHit any key to resume. Updates lost: 0" sys.stdout.write(banner) sys.stdout.flush() numPrintedChars=1 while True: sleep(1) c= self.keyPress.getChar(blocking=False) if c == "": strg = "" for i in range(numPrintedChars) : strg += "\b" strg = "%s%d" % (strg,self.numLostUpdates) numPrintedChars = len(strg)-numPrintedChars sys.stdout.write(strg) sys.stdout.flush() else: print break def help(self) : usage = """ c : configure h : help l : draw line now m : print mode p : pause q : quit 1-9 : verbosity (1 is low, 9 is high) """ self.stop() print " Current verbosity: %s" % self.verbosity print usage self.printLostUpdates() self.start() def configure(self) : """ Prompts for and reads in user values for update type and update period. """ configTypeMenu = """ Enter Update Type: 0 : current setting (%s) 1 : \'time\' 2 : \'pkts\' """ %(self.updateType) configPeriodMenu = """ Current Period is %d Enter Update Period (secs, 0 for current period): """ %(self.updatePeriod) self.stop() print configTypeMenu while True : # Getting Type input try : key = self.keyPress.getChar(blocking=True) if key == '0': print "keeping current update type" updateType = self.updateType break if key == '1': print "\'time\' update type selected" updateType = 'time' break if key == '2': print "\'pkts\' update type selected" updateType = 'pkts' break else : print "key %s not understood." % key print configTypeMenu except Exception, e: if len(e.args)>0: print e.args[0] else : raise print "key %s not understood." % key print configTypeMenu while True : # Getting Period input try : updatePeriodStr = raw_input(configPeriodMenu) updatePeriod = float(updatePeriodStr) if (updatePeriod == 0): updatePeriod = self.updatePeriod if not ((updatePeriod >= self.minPeriod) and (updatePeriod <= self.maxPeriod)): print("Your entered value is out of range [%d,%d]" %(self.minPeriod, self.maxPeriod)) else: break except Exception, e: if len(e.args)>0: print e.args[0] else : raise print "Input must be a float value. You input: %s" %(updatePeriodStr) # Synchronize with thread self.monSemaphore.acquire() self.updateType = updateType self.updatePeriod = updatePeriod self.monSemaphore.release() self.forceContinue = True self.start() def drawLine(self) : print " --------------- " def dispMode(self) : print ("Monitoring Mode: Update Type %s, Update Period %d" %(self.updateType, self.updatePeriod)) def pause(self) : self.stop() self.printLostUpdates() self.start() def start(self) : self.outputFlag = True def stop(self) : self.outputFlag = False self.numLostUpdates = 0 def quit(self) : ''' Quit Application. Stops Monitoring Thread, Stops message processing thread and returns the python prompt. ''' self.outputFlag = False self.running = False ##### Main Code ##### ## Instantiate your own Comm object and connection # import pytos.Comm as Comm # recvComm = Comm.Comm # recvComm.connect("sf@localhost:9001") ## Uses the comm object instantiated by app if (appType == 'TestDetectionEvent'): recvComm = app.rpc.receiveComm # drain comm sendComm = Comm.getCommObject(app) else: # assume 'MetricsMote' recvComm = Comm.getCommObject(app) # ex. app.connections[0] sendComm = recvComm metricsMsg = app.msgs.MetricsCmdMsg mMon = MetricsMonitor(sendComm,recvComm,appType) mMon.readKeys()
bufferx/tornado
refs/heads/master
tornado/concurrent.py
19
#!/usr/bin/env python # # Copyright 2012 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for working with threads and ``Futures``. ``Futures`` are a pattern for concurrent programming introduced in Python 3.2 in the `concurrent.futures` package (this package has also been backported to older versions of Python and can be installed with ``pip install futures``). Tornado will use `concurrent.futures.Future` if it is available; otherwise it will use a compatible class defined in this module. """ from __future__ import absolute_import, division, print_function, with_statement import functools import sys from tornado.stack_context import ExceptionStackContext, wrap from tornado.util import raise_exc_info, ArgReplacer try: from concurrent import futures except ImportError: futures = None class ReturnValueIgnoredError(Exception): pass class _DummyFuture(object): def __init__(self): self._done = False self._result = None self._exception = None self._callbacks = [] def cancel(self): return False def cancelled(self): return False def running(self): return not self._done def done(self): return self._done def result(self, timeout=None): self._check_done() if self._exception: raise self._exception return self._result def exception(self, timeout=None): self._check_done() if self._exception: return self._exception else: return None def add_done_callback(self, fn): if self._done: fn(self) else: self._callbacks.append(fn) def set_result(self, result): self._result = result self._set_done() def set_exception(self, exception): self._exception = exception self._set_done() def _check_done(self): if not self._done: raise Exception("DummyFuture does not support blocking for results") def _set_done(self): self._done = True for cb in self._callbacks: # TODO: error handling cb(self) self._callbacks = None if futures is None: Future = _DummyFuture else: Future = futures.Future class TracebackFuture(Future): """Subclass of `Future` which can store a traceback with exceptions. The traceback is automatically available in Python 3, but in the Python 2 futures backport this information is discarded. """ def __init__(self): super(TracebackFuture, self).__init__() self.__exc_info = None def exc_info(self): return self.__exc_info def set_exc_info(self, exc_info): """Traceback-aware replacement for `~concurrent.futures.Future.set_exception`. """ self.__exc_info = exc_info self.set_exception(exc_info[1]) def result(self, timeout=None): if self.__exc_info is not None: raise_exc_info(self.__exc_info) else: return super(TracebackFuture, self).result(timeout=timeout) class DummyExecutor(object): def submit(self, fn, *args, **kwargs): future = TracebackFuture() try: future.set_result(fn(*args, **kwargs)) except Exception: future.set_exc_info(sys.exc_info()) return future def shutdown(self, wait=True): pass dummy_executor = DummyExecutor() def run_on_executor(fn): """Decorator to run a synchronous method asynchronously on an executor. The decorated method may be called with a ``callback`` keyword argument and returns a future. This decorator should be used only on methods of objects with attributes ``executor`` and ``io_loop``. """ @functools.wraps(fn) def wrapper(self, *args, **kwargs): callback = kwargs.pop("callback", None) future = self.executor.submit(fn, self, *args, **kwargs) if callback: self.io_loop.add_future(future, lambda future: callback(future.result())) return future return wrapper _NO_RESULT = object() def return_future(f): """Decorator to make a function that returns via callback return a `Future`. The wrapped function should take a ``callback`` keyword argument and invoke it with one argument when it has finished. To signal failure, the function can simply raise an exception (which will be captured by the `.StackContext` and passed along to the ``Future``). From the caller's perspective, the callback argument is optional. If one is given, it will be invoked when the function is complete with `Future.result()` as an argument. If the function fails, the callback will not be run and an exception will be raised into the surrounding `.StackContext`. If no callback is given, the caller should use the ``Future`` to wait for the function to complete (perhaps by yielding it in a `.gen.engine` function, or passing it to `.IOLoop.add_future`). Usage:: @return_future def future_func(arg1, arg2, callback): # Do stuff (possibly asynchronous) callback(result) @gen.engine def caller(callback): yield future_func(arg1, arg2) callback() Note that ``@return_future`` and ``@gen.engine`` can be applied to the same function, provided ``@return_future`` appears first. However, consider using ``@gen.coroutine`` instead of this combination. """ replacer = ArgReplacer(f, 'callback') @functools.wraps(f) def wrapper(*args, **kwargs): future = TracebackFuture() callback, args, kwargs = replacer.replace( lambda value=_NO_RESULT: future.set_result(value), args, kwargs) def handle_error(typ, value, tb): future.set_exc_info((typ, value, tb)) return True exc_info = None with ExceptionStackContext(handle_error): try: result = f(*args, **kwargs) if result is not None: raise ReturnValueIgnoredError( "@return_future should not be used with functions " "that return values") except: exc_info = sys.exc_info() raise if exc_info is not None: # If the initial synchronous part of f() raised an exception, # go ahead and raise it to the caller directly without waiting # for them to inspect the Future. raise_exc_info(exc_info) # If the caller passed in a callback, schedule it to be called # when the future resolves. It is important that this happens # just before we return the future, or else we risk confusing # stack contexts with multiple exceptions (one here with the # immediate exception, and again when the future resolves and # the callback triggers its exception by calling future.result()). if callback is not None: def run_callback(future): result = future.result() if result is _NO_RESULT: callback() else: callback(future.result()) future.add_done_callback(wrap(run_callback)) return future return wrapper def chain_future(a, b): """Chain two futures together so that when one completes, so does the other. The result (success or failure) of ``a`` will be copied to ``b``. """ def copy(future): assert future is a if (isinstance(a, TracebackFuture) and isinstance(b, TracebackFuture) and a.exc_info() is not None): b.set_exc_info(a.exc_info()) elif a.exception() is not None: b.set_exception(a.exception()) else: b.set_result(a.result()) a.add_done_callback(copy)
midnightercz/pulp_docker
refs/heads/master
plugins/pulp_docker/plugins/distributors/publish_steps.py
1
from gettext import gettext as _ import logging import os from mongoengine import Q from pulp.plugins.util.publish_step import PluginStep, \ AtomicDirectoryPublishStep, SaveTarFilePublishStep from pulp.plugins.util import misc as plugin_utils from pulp.server.controllers import repository as repo_controller from pulp_docker.common import constants from pulp_docker.plugins.distributors import configuration from pulp_docker.plugins.distributors.metadata import RedirectFileContext _LOG = logging.getLogger(__name__) class WebPublisher(PluginStep): """ Docker Web publisher class that is responsible for the actual publishing of a docker repository via a web server """ def __init__(self, repo, publish_conduit, config): """ :param repo: Pulp managed Yum repository. :type repo: pulp.server.db.model.Repository :param publish_conduit: Conduit providing access to relative Pulp functionality :type publish_conduit: pulp.plugins.conduits.repo_publish.RepoPublishConduit :param config: Pulp configuration for the distributor :type config: pulp.plugins.config.PluginCallConfiguration """ super(WebPublisher, self).__init__(constants.PUBLISH_STEP_WEB_PUBLISHER, repo, publish_conduit, config) publish_dir = configuration.get_web_publish_dir(repo, config) app_file = configuration.get_redirect_file_name(repo) app_publish_location = os.path.join(configuration.get_app_publish_dir(config), app_file) self.web_working_dir = os.path.join(self.get_working_dir(), 'web') master_publish_dir = configuration.get_master_publish_dir(repo, config) atomic_publish_step = AtomicDirectoryPublishStep(self.get_working_dir(), [('web', publish_dir), (app_file, app_publish_location)], master_publish_dir, step_type=constants.PUBLISH_STEP_OVER_HTTP) atomic_publish_step.description = _('Making files available via web.') self.add_child(PublishImagesStep()) self.add_child(atomic_publish_step) class ExportPublisher(PluginStep): """ Docker Export publisher class that is responsible for the actual publishing of a docker repository via a tar file """ def __init__(self, repo, publish_conduit, config): """ :param repo: Pulp managed Yum repository :type repo: pulp.server.db.model.Repository :param publish_conduit: Conduit providing access to relative Pulp functionality :type publish_conduit: pulp.plugins.conduits.repo_publish.RepoPublishConduit :param config: Pulp configuration for the distributor :type config: pulp.plugins.config.PluginCallConfiguration """ super(ExportPublisher, self).__init__(constants.PUBLISH_STEP_EXPORT_PUBLISHER, repo, publish_conduit, config) self.add_child(PublishImagesStep()) tar_file = configuration.get_export_repo_file_with_path(repo, config) self.add_child(SaveTarFilePublishStep(self.get_working_dir(), tar_file)) class PublishImagesStep(PluginStep): """ Publish Images """ def __init__(self): super(PublishImagesStep, self).__init__(constants.PUBLISH_STEP_IMAGES) self.context = None self.redirect_context = None self.description = _('Publishing Image Files.') def initialize(self): """ Initialize the metadata contexts """ self.redirect_context = RedirectFileContext(self.get_working_dir(), self.get_conduit(), self.parent.config, self.get_repo()) self.redirect_context.initialize() def get_total(self): """ Get the total number of images to publish. The value returned should not change during the processing of the step. :returns: The total number of images to publish :rtype: int """ repo = self.get_repo() return repo.content_unit_counts.get(constants.IMAGE_TYPE_ID, 0) def get_iterator(self): """ This method returns a generator to loop over items. The items created by this generator will be iterated over by the process_main method. :return: a list or other iterable :rtype: iterator of pulp_docker.plugins.db.models.DockerImage """ return repo_controller.find_repo_content_units( self.get_repo(), repo_content_unit_q=Q(unit_type_id=constants.IMAGE_TYPE_ID), yield_content_unit=True) def process_main(self, item=None): """ Link the unit to the image content directory and the package_dir :param item: The unit to process :type item: pulp_docker.plugins.db.models.DockerImage """ self.redirect_context.add_unit_metadata(item) target_base = os.path.join(self.get_web_directory(), item.image_id) files = ['ancestry', 'json', 'layer'] for file_name in files: plugin_utils.create_symlink(os.path.join(item.storage_path, file_name), os.path.join(target_base, file_name)) def finalize(self): """ Close & finalize each the metadata context """ if self.redirect_context: self.redirect_context.finalize() def get_web_directory(self): """ Get the directory where the files published to the web have been linked """ return os.path.join(self.get_working_dir(), 'web')
windedge/odoomrp-wip
refs/heads/8.0
account_treasury_forecast_banking/models/__init__.py
124
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## from . import account_treasury_forecast_template from . import account_treasury_forecast
OSSHealth/ghdata
refs/heads/master
conftest.py
1
#SPDX-License-Identifier: MIT import pytest import re from augur.application import Application from augur.cli.backend import initialize_components default_repo_id = "25430" default_repo_group_id = "10" def create_full_routes(routes): full_routes = [] for route in routes: route = re.sub("<default_repo_id>", default_repo_id, route) route = re.sub("<default_repo_group_id>", default_repo_group_id, route) route = "http://localhost:5000/api/unstable/" + route full_routes.append(route) return full_routes @pytest.fixture(scope="session") def augur_app(): augur_app = Application(disable_logs=True) return augur_app @pytest.fixture(scope="session") def metrics(augur_app): return augur_app.metrics @pytest.fixture(scope="session") def client(augur_app): flask_client = initialize_components(augur_app, disable_housekeeper=True).load() return flask_client.test_client()
aerophile/django
refs/heads/master
tests/staticfiles_tests/tests.py
19
# -*- encoding: utf-8 -*- from __future__ import unicode_literals import codecs import os import posixpath import shutil import sys import tempfile import unittest from django.conf import settings from django.contrib.staticfiles import finders, storage from django.contrib.staticfiles.management.commands import collectstatic from django.contrib.staticfiles.management.commands.collectstatic import \ Command as CollectstaticCommand from django.core.cache.backends.base import BaseCache from django.core.exceptions import ImproperlyConfigured from django.core.management import call_command from django.template import Context, Template from django.test import TestCase, override_settings from django.utils import six from django.utils._os import symlinks_supported, upath from django.utils.encoding import force_text from django.utils.functional import empty from .storage import DummyStorage TEST_ROOT = os.path.dirname(upath(__file__)) TESTFILES_PATH = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test') TEST_SETTINGS = { 'DEBUG': True, 'MEDIA_URL': '/media/', 'STATIC_URL': '/static/', 'MEDIA_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'media'), 'STATIC_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'static'), 'STATICFILES_DIRS': [ os.path.join(TEST_ROOT, 'project', 'documents'), ('prefix', os.path.join(TEST_ROOT, 'project', 'prefixed')), ], 'STATICFILES_FINDERS': [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'django.contrib.staticfiles.finders.DefaultStorageFinder', ], 'INSTALLED_APPS': [ 'django.contrib.staticfiles', 'staticfiles_tests', 'staticfiles_tests.apps.test', 'staticfiles_tests.apps.no_label', ], } class BaseStaticFilesTestCase(object): """ Test case with a couple utility assertions. """ def assertFileContains(self, filepath, text): self.assertIn(text, self._get_file(force_text(filepath)), "'%s' not in '%s'" % (text, filepath)) def assertFileNotFound(self, filepath): self.assertRaises(IOError, self._get_file, filepath) def render_template(self, template, **kwargs): if isinstance(template, six.string_types): template = Template(template) return template.render(Context(kwargs)).strip() def static_template_snippet(self, path, asvar=False): if asvar: return "{%% load static from staticfiles %%}{%% static '%s' as var %%}{{ var }}" % path return "{%% load static from staticfiles %%}{%% static '%s' %%}" % path def assertStaticRenders(self, path, result, asvar=False, **kwargs): template = self.static_template_snippet(path, asvar) self.assertEqual(self.render_template(template, **kwargs), result) def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs): self.assertRaises(exc, self.assertStaticRenders, path, result, **kwargs) @override_settings(**TEST_SETTINGS) class StaticFilesTestCase(BaseStaticFilesTestCase, TestCase): pass class BaseCollectionTestCase(BaseStaticFilesTestCase): """ Tests shared by all file finding features (collectstatic, findstatic, and static serve view). This relies on the asserts defined in BaseStaticFilesTestCase, but is separated because some test cases need those asserts without all these tests. """ def setUp(self): super(BaseCollectionTestCase, self).setUp() temp_dir = tempfile.mkdtemp() # Override the STATIC_ROOT for all tests from setUp to tearDown # rather than as a context manager self.patched_settings = self.settings(STATIC_ROOT=temp_dir) self.patched_settings.enable() self.run_collectstatic() # Same comment as in runtests.teardown. self.addCleanup(shutil.rmtree, six.text_type(temp_dir)) def tearDown(self): self.patched_settings.disable() super(BaseCollectionTestCase, self).tearDown() def run_collectstatic(self, **kwargs): call_command('collectstatic', interactive=False, verbosity=0, ignore_patterns=['*.ignoreme'], **kwargs) def _get_file(self, filepath): assert filepath, 'filepath is empty.' filepath = os.path.join(settings.STATIC_ROOT, filepath) with codecs.open(filepath, "r", "utf-8") as f: return f.read() class CollectionTestCase(BaseCollectionTestCase, StaticFilesTestCase): pass class TestDefaults(object): """ A few standard test cases. """ def test_staticfiles_dirs(self): """ Can find a file in a STATICFILES_DIRS directory. """ self.assertFileContains('test.txt', 'Can we find') self.assertFileContains(os.path.join('prefix', 'test.txt'), 'Prefix') def test_staticfiles_dirs_subdir(self): """ Can find a file in a subdirectory of a STATICFILES_DIRS directory. """ self.assertFileContains('subdir/test.txt', 'Can we find') def test_staticfiles_dirs_priority(self): """ File in STATICFILES_DIRS has priority over file in app. """ self.assertFileContains('test/file.txt', 'STATICFILES_DIRS') def test_app_files(self): """ Can find a file in an app static/ directory. """ self.assertFileContains('test/file1.txt', 'file1 in the app dir') def test_nonascii_filenames(self): """ Can find a file with non-ASCII character in an app static/ directory. """ self.assertFileContains('test/⊗.txt', '⊗ in the app dir') def test_camelcase_filenames(self): """ Can find a file with capital letters. """ self.assertFileContains('test/camelCase.txt', 'camelCase') class TestFindStatic(CollectionTestCase, TestDefaults): """ Test ``findstatic`` management command. """ def _get_file(self, filepath): out = six.StringIO() call_command('findstatic', filepath, all=False, verbosity=0, stdout=out) out.seek(0) lines = [l.strip() for l in out.readlines()] with codecs.open(force_text(lines[0].strip()), "r", "utf-8") as f: return f.read() def test_all_files(self): """ Test that findstatic returns all candidate files if run without --first and -v1. """ out = six.StringIO() call_command('findstatic', 'test/file.txt', verbosity=1, stdout=out) out.seek(0) lines = [l.strip() for l in out.readlines()] self.assertEqual(len(lines), 3) # three because there is also the "Found <file> here" line self.assertIn('project', force_text(lines[1])) self.assertIn('apps', force_text(lines[2])) def test_all_files_less_verbose(self): """ Test that findstatic returns all candidate files if run without --first and -v0. """ out = six.StringIO() call_command('findstatic', 'test/file.txt', verbosity=0, stdout=out) out.seek(0) lines = [l.strip() for l in out.readlines()] self.assertEqual(len(lines), 2) self.assertIn('project', force_text(lines[0])) self.assertIn('apps', force_text(lines[1])) def test_all_files_more_verbose(self): """ Test that findstatic returns all candidate files if run without --first and -v2. Also, test that findstatic returns the searched locations with -v2. """ out = six.StringIO() call_command('findstatic', 'test/file.txt', verbosity=2, stdout=out) out.seek(0) lines = [l.strip() for l in out.readlines()] self.assertIn('project', force_text(lines[1])) self.assertIn('apps', force_text(lines[2])) self.assertIn("Looking in the following locations:", force_text(lines[3])) searched_locations = ', '.join(force_text(x) for x in lines[4:]) # AppDirectoriesFinder searched locations self.assertIn(os.path.join('staticfiles_tests', 'apps', 'test', 'static'), searched_locations) self.assertIn(os.path.join('staticfiles_tests', 'apps', 'no_label', 'static'), searched_locations) # FileSystemFinder searched locations self.assertIn(TEST_SETTINGS['STATICFILES_DIRS'][1][1], searched_locations) self.assertIn(TEST_SETTINGS['STATICFILES_DIRS'][0], searched_locations) # DefaultStorageFinder searched locations self.assertIn(os.path.join('staticfiles_tests', 'project', 'site_media', 'media'), searched_locations) class TestConfiguration(StaticFilesTestCase): def test_location_empty(self): err = six.StringIO() for root in ['', None]: with override_settings(STATIC_ROOT=root): with six.assertRaisesRegex( self, ImproperlyConfigured, 'without having set the STATIC_ROOT setting to a filesystem path'): call_command('collectstatic', interactive=False, verbosity=0, stderr=err) def test_local_storage_detection_helper(self): staticfiles_storage = storage.staticfiles_storage try: storage.staticfiles_storage._wrapped = empty with override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage'): command = collectstatic.Command() self.assertTrue(command.is_local_storage()) storage.staticfiles_storage._wrapped = empty with override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage'): command = collectstatic.Command() self.assertFalse(command.is_local_storage()) collectstatic.staticfiles_storage = storage.FileSystemStorage() command = collectstatic.Command() self.assertTrue(command.is_local_storage()) collectstatic.staticfiles_storage = DummyStorage() command = collectstatic.Command() self.assertFalse(command.is_local_storage()) finally: staticfiles_storage._wrapped = empty collectstatic.staticfiles_storage = staticfiles_storage storage.staticfiles_storage = staticfiles_storage class TestCollection(CollectionTestCase, TestDefaults): """ Test ``collectstatic`` management command. """ def test_ignore(self): """ Test that -i patterns are ignored. """ self.assertFileNotFound('test/test.ignoreme') def test_common_ignore_patterns(self): """ Common ignore patterns (*~, .*, CVS) are ignored. """ self.assertFileNotFound('test/.hidden') self.assertFileNotFound('test/backup~') self.assertFileNotFound('test/CVS') class TestCollectionClear(CollectionTestCase): """ Test the ``--clear`` option of the ``collectstatic`` management command. """ def run_collectstatic(self, **kwargs): clear_filepath = os.path.join(settings.STATIC_ROOT, 'cleared.txt') with open(clear_filepath, 'w') as f: f.write('should be cleared') super(TestCollectionClear, self).run_collectstatic(clear=True) def test_cleared_not_found(self): self.assertFileNotFound('cleared.txt') def test_dir_not_exists(self, **kwargs): shutil.rmtree(six.text_type(settings.STATIC_ROOT)) super(TestCollectionClear, self).run_collectstatic(clear=True) class TestCollectionExcludeNoDefaultIgnore(CollectionTestCase, TestDefaults): """ Test ``--exclude-dirs`` and ``--no-default-ignore`` options of the ``collectstatic`` management command. """ def run_collectstatic(self): super(TestCollectionExcludeNoDefaultIgnore, self).run_collectstatic( use_default_ignore_patterns=False) def test_no_common_ignore_patterns(self): """ With --no-default-ignore, common ignore patterns (*~, .*, CVS) are not ignored. """ self.assertFileContains('test/.hidden', 'should be ignored') self.assertFileContains('test/backup~', 'should be ignored') self.assertFileContains('test/CVS', 'should be ignored') class TestNoFilesCreated(object): def test_no_files_created(self): """ Make sure no files were create in the destination directory. """ self.assertEqual(os.listdir(settings.STATIC_ROOT), []) class TestCollectionDryRun(CollectionTestCase, TestNoFilesCreated): """ Test ``--dry-run`` option for ``collectstatic`` management command. """ def run_collectstatic(self): super(TestCollectionDryRun, self).run_collectstatic(dry_run=True) class TestCollectionFilesOverride(CollectionTestCase): """ Test overriding duplicated files by ``collectstatic`` management command. Check for proper handling of apps order in installed apps even if file modification dates are in different order: 'staticfiles_tests.apps.test', 'staticfiles_tests.apps.no_label', """ def setUp(self): self.orig_path = os.path.join(TEST_ROOT, 'apps', 'no_label', 'static', 'file2.txt') # get modification and access times for no_label/static/file2.txt self.orig_mtime = os.path.getmtime(self.orig_path) self.orig_atime = os.path.getatime(self.orig_path) # prepare duplicate of file2.txt from no_label app # this file will have modification time older than no_label/static/file2.txt # anyway it should be taken to STATIC_ROOT because 'test' app is before # 'no_label' app in installed apps self.testfile_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'file2.txt') with open(self.testfile_path, 'w+') as f: f.write('duplicate of file2.txt') os.utime(self.testfile_path, (self.orig_atime - 1, self.orig_mtime - 1)) super(TestCollectionFilesOverride, self).setUp() def tearDown(self): if os.path.exists(self.testfile_path): os.unlink(self.testfile_path) # set back original modification time os.utime(self.orig_path, (self.orig_atime, self.orig_mtime)) super(TestCollectionFilesOverride, self).tearDown() def test_ordering_override(self): """ Test if collectstatic takes files in proper order """ self.assertFileContains('file2.txt', 'duplicate of file2.txt') # run collectstatic again self.run_collectstatic() self.assertFileContains('file2.txt', 'duplicate of file2.txt') # and now change modification time of no_label/static/file2.txt # test app is first in installed apps so file2.txt should remain unmodified mtime = os.path.getmtime(self.testfile_path) atime = os.path.getatime(self.testfile_path) os.utime(self.orig_path, (mtime + 1, atime + 1)) # run collectstatic again self.run_collectstatic() self.assertFileContains('file2.txt', 'duplicate of file2.txt') @override_settings( STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage', ) class TestCollectionNonLocalStorage(CollectionTestCase, TestNoFilesCreated): """ Tests for #15035 """ pass def hashed_file_path(test, path): fullpath = test.render_template(test.static_template_snippet(path)) return fullpath.replace(settings.STATIC_URL, '') class TestHashedFiles(object): hashed_file_path = hashed_file_path def tearDown(self): # Clear hashed files to avoid side effects among tests. storage.staticfiles_storage.hashed_files.clear() def test_template_tag_return(self): """ Test the CachedStaticFilesStorage backend. """ self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png") self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt") self.assertStaticRenders("test/file.txt", "/static/test/file.dad0999e4f8f.txt", asvar=True) self.assertStaticRenders("cached/styles.css", "/static/cached/styles.bb84a0240107.css") self.assertStaticRenders("path/", "/static/path/") self.assertStaticRenders("path/?query", "/static/path/?query") def test_template_tag_simple_content(self): relpath = self.hashed_file_path("cached/styles.css") self.assertEqual(relpath, "cached/styles.bb84a0240107.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) def test_path_ignored_completely(self): relpath = self.hashed_file_path("cached/css/ignored.css") self.assertEqual(relpath, "cached/css/ignored.6c77f2643390.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertIn(b'#foobar', content) self.assertIn(b'http:foobar', content) self.assertIn(b'https:foobar', content) self.assertIn(b'data:foobar', content) self.assertIn(b'//foobar', content) def test_path_with_querystring(self): relpath = self.hashed_file_path("cached/styles.css?spam=eggs") self.assertEqual(relpath, "cached/styles.bb84a0240107.css?spam=eggs") with storage.staticfiles_storage.open( "cached/styles.bb84a0240107.css") as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) def test_path_with_fragment(self): relpath = self.hashed_file_path("cached/styles.css#eggs") self.assertEqual(relpath, "cached/styles.bb84a0240107.css#eggs") with storage.staticfiles_storage.open( "cached/styles.bb84a0240107.css") as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) def test_path_with_querystring_and_fragment(self): relpath = self.hashed_file_path("cached/css/fragments.css") self.assertEqual(relpath, "cached/css/fragments.75433540b096.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertIn(b'fonts/font.a4b0478549d0.eot?#iefix', content) self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content) self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content) self.assertIn(b'#default#VML', content) def test_template_tag_absolute(self): relpath = self.hashed_file_path("cached/absolute.css") self.assertEqual(relpath, "cached/absolute.ae9ef2716fe3.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"/static/cached/styles.css", content) self.assertIn(b"/static/cached/styles.bb84a0240107.css", content) self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content) def test_template_tag_denorm(self): relpath = self.hashed_file_path("cached/denorm.css") self.assertEqual(relpath, "cached/denorm.c5bd139ad821.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"..//cached///styles.css", content) self.assertIn(b"../cached/styles.bb84a0240107.css", content) self.assertNotIn(b"url(img/relative.png )", content) self.assertIn(b'url("img/relative.acae32e4532b.png', content) def test_template_tag_relative(self): relpath = self.hashed_file_path("cached/relative.css") self.assertEqual(relpath, "cached/relative.b0375bd89156.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"../cached/styles.css", content) self.assertNotIn(b'@import "styles.css"', content) self.assertNotIn(b'url(img/relative.png)', content) self.assertIn(b'url("img/relative.acae32e4532b.png")', content) self.assertIn(b"../cached/styles.bb84a0240107.css", content) def test_import_replacement(self): "See #18050" relpath = self.hashed_file_path("cached/import.css") self.assertEqual(relpath, "cached/import.2b1d40b0bbd4.css") with storage.staticfiles_storage.open(relpath) as relfile: self.assertIn(b"""import url("styles.bb84a0240107.css")""", relfile.read()) def test_template_tag_deep_relative(self): relpath = self.hashed_file_path("cached/css/window.css") self.assertEqual(relpath, "cached/css/window.3906afbb5a17.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b'url(img/window.png)', content) self.assertIn(b'url("img/window.acae32e4532b.png")', content) def test_template_tag_url(self): relpath = self.hashed_file_path("cached/url.css") self.assertEqual(relpath, "cached/url.902310b73412.css") with storage.staticfiles_storage.open(relpath) as relfile: self.assertIn(b"https://", relfile.read()) def test_post_processing(self): """Test that post_processing behaves correctly. Files that are alterable should always be post-processed; files that aren't should be skipped. collectstatic has already been called once in setUp() for this testcase, therefore we check by verifying behavior on a second run. """ collectstatic_args = { 'interactive': False, 'verbosity': 0, 'link': False, 'clear': False, 'dry_run': False, 'post_process': True, 'use_default_ignore_patterns': True, 'ignore_patterns': ['*.ignoreme'], } collectstatic_cmd = CollectstaticCommand() collectstatic_cmd.set_options(**collectstatic_args) stats = collectstatic_cmd.collect() self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed']) self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified']) self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed']) def test_css_import_case_insensitive(self): relpath = self.hashed_file_path("cached/styles_insensitive.css") self.assertEqual(relpath, "cached/styles_insensitive.c609562b6d3c.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.d41d8cd98f00.css", content) @override_settings( STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'faulty')], STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'], ) def test_post_processing_failure(self): """ Test that post_processing indicates the origin of the error when it fails. Regression test for #18986. """ finders.get_finder.cache_clear() err = six.StringIO() with self.assertRaises(Exception): call_command('collectstatic', interactive=False, verbosity=0, stderr=err) self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue()) # we set DEBUG to False here since the template tag wouldn't work otherwise @override_settings(**dict( TEST_SETTINGS, STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage', DEBUG=False, )) class TestCollectionCachedStorage(TestHashedFiles, BaseCollectionTestCase, BaseStaticFilesTestCase, TestCase): """ Tests for the Cache busting storage """ def test_cache_invalidation(self): name = "cached/styles.css" hashed_name = "cached/styles.bb84a0240107.css" # check if the cache is filled correctly as expected cache_key = storage.staticfiles_storage.hash_key(name) cached_name = storage.staticfiles_storage.hashed_files.get(cache_key) self.assertEqual(self.hashed_file_path(name), cached_name) # clearing the cache to make sure we re-set it correctly in the url method storage.staticfiles_storage.hashed_files.clear() cached_name = storage.staticfiles_storage.hashed_files.get(cache_key) self.assertEqual(cached_name, None) self.assertEqual(self.hashed_file_path(name), hashed_name) cached_name = storage.staticfiles_storage.hashed_files.get(cache_key) self.assertEqual(cached_name, hashed_name) def test_cache_key_memcache_validation(self): """ Handle cache key creation correctly, see #17861. """ name = "/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/" + "\x16" + "\xb4" cache_key = storage.staticfiles_storage.hash_key(name) cache_validator = BaseCache({}) cache_validator.validate_key(cache_key) self.assertEqual(cache_key, 'staticfiles:821ea71ef36f95b3922a77f7364670e7') # we set DEBUG to False here since the template tag wouldn't work otherwise @override_settings(**dict( TEST_SETTINGS, STATICFILES_STORAGE='django.contrib.staticfiles.storage.ManifestStaticFilesStorage', DEBUG=False, )) class TestCollectionManifestStorage(TestHashedFiles, BaseCollectionTestCase, BaseStaticFilesTestCase, TestCase): """ Tests for the Cache busting storage """ def setUp(self): super(TestCollectionManifestStorage, self).setUp() self._clear_filename = os.path.join(TESTFILES_PATH, 'cleared.txt') with open(self._clear_filename, 'w') as f: f.write('to be deleted in one test') def tearDown(self): super(TestCollectionManifestStorage, self).tearDown() if os.path.exists(self._clear_filename): os.unlink(self._clear_filename) def test_manifest_exists(self): filename = storage.staticfiles_storage.manifest_name path = storage.staticfiles_storage.path(filename) self.assertTrue(os.path.exists(path)) def test_loaded_cache(self): self.assertNotEqual(storage.staticfiles_storage.hashed_files, {}) manifest_content = storage.staticfiles_storage.read_manifest() self.assertIn('"version": "%s"' % storage.staticfiles_storage.manifest_version, force_text(manifest_content)) def test_parse_cache(self): hashed_files = storage.staticfiles_storage.hashed_files manifest = storage.staticfiles_storage.load_manifest() self.assertEqual(hashed_files, manifest) def test_clear_empties_manifest(self): cleared_file_name = os.path.join('test', 'cleared.txt') # collect the additional file self.run_collectstatic() hashed_files = storage.staticfiles_storage.hashed_files self.assertIn(cleared_file_name, hashed_files) manifest_content = storage.staticfiles_storage.load_manifest() self.assertIn(cleared_file_name, manifest_content) original_path = storage.staticfiles_storage.path(cleared_file_name) self.assertTrue(os.path.exists(original_path)) # delete the original file form the app, collect with clear os.unlink(self._clear_filename) self.run_collectstatic(clear=True) self.assertFileNotFound(original_path) hashed_files = storage.staticfiles_storage.hashed_files self.assertNotIn(cleared_file_name, hashed_files) manifest_content = storage.staticfiles_storage.load_manifest() self.assertNotIn(cleared_file_name, manifest_content) # we set DEBUG to False here since the template tag wouldn't work otherwise @override_settings(**dict( TEST_SETTINGS, STATICFILES_STORAGE='staticfiles_tests.storage.SimpleCachedStaticFilesStorage', DEBUG=False, )) class TestCollectionSimpleCachedStorage(BaseCollectionTestCase, BaseStaticFilesTestCase, TestCase): """ Tests for the Cache busting storage """ hashed_file_path = hashed_file_path def test_template_tag_return(self): """ Test the CachedStaticFilesStorage backend. """ self.assertStaticRaises(ValueError, "does/not/exist.png", "/static/does/not/exist.png") self.assertStaticRenders("test/file.txt", "/static/test/file.deploy12345.txt") self.assertStaticRenders("cached/styles.css", "/static/cached/styles.deploy12345.css") self.assertStaticRenders("path/", "/static/path/") self.assertStaticRenders("path/?query", "/static/path/?query") def test_template_tag_simple_content(self): relpath = self.hashed_file_path("cached/styles.css") self.assertEqual(relpath, "cached/styles.deploy12345.css") with storage.staticfiles_storage.open(relpath) as relfile: content = relfile.read() self.assertNotIn(b"cached/other.css", content) self.assertIn(b"other.deploy12345.css", content) @unittest.skipUnless(symlinks_supported(), "Must be able to symlink to run this test.") class TestCollectionLinks(CollectionTestCase, TestDefaults): """ Test ``--link`` option for ``collectstatic`` management command. Note that by inheriting ``TestDefaults`` we repeat all the standard file resolving tests here, to make sure using ``--link`` does not change the file-selection semantics. """ def run_collectstatic(self): super(TestCollectionLinks, self).run_collectstatic(link=True) def test_links_created(self): """ With ``--link``, symbolic links are created. """ self.assertTrue(os.path.islink(os.path.join(settings.STATIC_ROOT, 'test.txt'))) def test_broken_symlink(self): """ Test broken symlink gets deleted. """ path = os.path.join(settings.STATIC_ROOT, 'test.txt') os.unlink(path) self.run_collectstatic() self.assertTrue(os.path.islink(path)) @override_settings(ROOT_URLCONF='staticfiles_tests.urls.default') class TestServeStatic(StaticFilesTestCase): """ Test static asset serving view. """ def _response(self, filepath): return self.client.get( posixpath.join(settings.STATIC_URL, filepath)) def assertFileContains(self, filepath, text): self.assertContains(self._response(filepath), text) def assertFileNotFound(self, filepath): self.assertEqual(self._response(filepath).status_code, 404) @override_settings(DEBUG=False) class TestServeDisabled(TestServeStatic): """ Test serving static files disabled when DEBUG is False. """ def test_disabled_serving(self): self.assertFileNotFound('test.txt') class TestServeStaticWithDefaultURL(TestServeStatic, TestDefaults): """ Test static asset serving view with manually configured URLconf. """ pass @override_settings(ROOT_URLCONF='staticfiles_tests.urls.helper') class TestServeStaticWithURLHelper(TestServeStatic, TestDefaults): """ Test static asset serving view with staticfiles_urlpatterns helper. """ class FinderTestCase(object): """ Base finder test mixin. On Windows, sometimes the case of the path we ask the finders for and the path(s) they find can differ. Compare them using os.path.normcase() to avoid false negatives. """ def test_find_first(self): src, dst = self.find_first found = self.finder.find(src) self.assertEqual(os.path.normcase(found), os.path.normcase(dst)) def test_find_all(self): src, dst = self.find_all found = self.finder.find(src, all=True) found = [os.path.normcase(f) for f in found] dst = [os.path.normcase(d) for d in dst] self.assertEqual(found, dst) class TestFileSystemFinder(StaticFilesTestCase, FinderTestCase): """ Test FileSystemFinder. """ def setUp(self): super(TestFileSystemFinder, self).setUp() self.finder = finders.FileSystemFinder() test_file_path = os.path.join(TEST_ROOT, 'project', 'documents', 'test', 'file.txt') self.find_first = (os.path.join('test', 'file.txt'), test_file_path) self.find_all = (os.path.join('test', 'file.txt'), [test_file_path]) class TestAppDirectoriesFinder(StaticFilesTestCase, FinderTestCase): """ Test AppDirectoriesFinder. """ def setUp(self): super(TestAppDirectoriesFinder, self).setUp() self.finder = finders.AppDirectoriesFinder() test_file_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test', 'file1.txt') self.find_first = (os.path.join('test', 'file1.txt'), test_file_path) self.find_all = (os.path.join('test', 'file1.txt'), [test_file_path]) class TestDefaultStorageFinder(StaticFilesTestCase, FinderTestCase): """ Test DefaultStorageFinder. """ def setUp(self): super(TestDefaultStorageFinder, self).setUp() self.finder = finders.DefaultStorageFinder( storage=storage.StaticFilesStorage(location=settings.MEDIA_ROOT)) test_file_path = os.path.join(settings.MEDIA_ROOT, 'media-file.txt') self.find_first = ('media-file.txt', test_file_path) self.find_all = ('media-file.txt', [test_file_path]) @override_settings( STATICFILES_FINDERS=['django.contrib.staticfiles.finders.FileSystemFinder'], STATICFILES_DIRS=[os.path.join(TEST_ROOT, 'project', 'documents')], ) class TestMiscFinder(TestCase): """ A few misc finder tests. """ def test_get_finder(self): self.assertIsInstance(finders.get_finder( 'django.contrib.staticfiles.finders.FileSystemFinder'), finders.FileSystemFinder) def test_get_finder_bad_classname(self): self.assertRaises(ImportError, finders.get_finder, 'django.contrib.staticfiles.finders.FooBarFinder') def test_get_finder_bad_module(self): self.assertRaises(ImportError, finders.get_finder, 'foo.bar.FooBarFinder') def test_cache(self): finders.get_finder.cache_clear() for n in range(10): finders.get_finder( 'django.contrib.staticfiles.finders.FileSystemFinder') cache_info = finders.get_finder.cache_info() self.assertEqual(cache_info.hits, 9) self.assertEqual(cache_info.currsize, 1) def test_searched_locations(self): finders.find('spam') self.assertEqual(finders.searched_locations, [os.path.join(TEST_ROOT, 'project', 'documents')]) @override_settings(STATICFILES_DIRS='a string') def test_non_tuple_raises_exception(self): """ We can't determine if STATICFILES_DIRS is set correctly just by looking at the type, but we can determine if it's definitely wrong. """ self.assertRaises(ImproperlyConfigured, finders.FileSystemFinder) @override_settings(MEDIA_ROOT='') def test_location_empty(self): self.assertRaises(ImproperlyConfigured, finders.DefaultStorageFinder) class TestTemplateTag(StaticFilesTestCase): def test_template_tag(self): self.assertStaticRenders("does/not/exist.png", "/static/does/not/exist.png") self.assertStaticRenders("testfile.txt", "/static/testfile.txt") class CustomStaticFilesStorage(storage.StaticFilesStorage): """ Used in TestStaticFilePermissions """ def __init__(self, *args, **kwargs): kwargs['file_permissions_mode'] = 0o640 kwargs['directory_permissions_mode'] = 0o740 super(CustomStaticFilesStorage, self).__init__(*args, **kwargs) @unittest.skipIf(sys.platform.startswith('win'), "Windows only partially supports chmod.") class TestStaticFilePermissions(BaseCollectionTestCase, StaticFilesTestCase): command_params = {'interactive': False, 'post_process': True, 'verbosity': 0, 'ignore_patterns': ['*.ignoreme'], 'use_default_ignore_patterns': True, 'clear': False, 'link': False, 'dry_run': False} def setUp(self): self.umask = 0o027 self.old_umask = os.umask(self.umask) super(TestStaticFilePermissions, self).setUp() def tearDown(self): os.umask(self.old_umask) super(TestStaticFilePermissions, self).tearDown() # Don't run collectstatic command in this test class. def run_collectstatic(self, **kwargs): pass @override_settings(FILE_UPLOAD_PERMISSIONS=0o655, FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765) def test_collect_static_files_permissions(self): collectstatic.Command().execute(**self.command_params) test_file = os.path.join(settings.STATIC_ROOT, "test.txt") test_dir = os.path.join(settings.STATIC_ROOT, "subdir") file_mode = os.stat(test_file)[0] & 0o777 dir_mode = os.stat(test_dir)[0] & 0o777 self.assertEqual(file_mode, 0o655) self.assertEqual(dir_mode, 0o765) @override_settings(FILE_UPLOAD_PERMISSIONS=None, FILE_UPLOAD_DIRECTORY_PERMISSIONS=None) def test_collect_static_files_default_permissions(self): collectstatic.Command().execute(**self.command_params) test_file = os.path.join(settings.STATIC_ROOT, "test.txt") test_dir = os.path.join(settings.STATIC_ROOT, "subdir") file_mode = os.stat(test_file)[0] & 0o777 dir_mode = os.stat(test_dir)[0] & 0o777 self.assertEqual(file_mode, 0o666 & ~self.umask) self.assertEqual(dir_mode, 0o777 & ~self.umask) @override_settings(FILE_UPLOAD_PERMISSIONS=0o655, FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765, STATICFILES_STORAGE='staticfiles_tests.tests.CustomStaticFilesStorage') def test_collect_static_files_subclass_of_static_storage(self): collectstatic.Command().execute(**self.command_params) test_file = os.path.join(settings.STATIC_ROOT, "test.txt") test_dir = os.path.join(settings.STATIC_ROOT, "subdir") file_mode = os.stat(test_file)[0] & 0o777 dir_mode = os.stat(test_dir)[0] & 0o777 self.assertEqual(file_mode, 0o640) self.assertEqual(dir_mode, 0o740)
goodwinnk/intellij-community
refs/heads/master
python/testData/resolve/FormatStringWithPackedDictAsArgument.py
38
v = "first is {<ref>fst}, second is {snd}".format(**{"fst": "f", "snd": "s"})
noikiy/mitmproxy
refs/heads/master
test/test_controller.py
33
import mock from libmproxy import controller class TestMaster: def test_default_handler(self): m = controller.Master(None) msg = mock.MagicMock() m.handle("type", msg) assert msg.reply.call_count == 1
uclouvain/OSIS-Louvain
refs/heads/master
attribution/tests/models/test_attribution_charge_new.py
1
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## from django.test import TestCase from attribution.models import attribution_charge_new from attribution.models.enums import function from attribution.tests.factories.attribution_charge_new import AttributionChargeNewFactory from attribution.tests.factories.attribution_new import AttributionNewFactory from base.models.enums import component_type from base.tests.factories.learning_component_year import LearningComponentYearFactory from base.tests.factories.person import PersonFactory from base.tests.factories.tutor import TutorFactory class AttributionChargeNewTest(TestCase): @classmethod def setUpTestData(cls): cls.person = PersonFactory(first_name="John", last_name="Doe") cls.tutor = TutorFactory(person=cls.person) cls.attribution_new = AttributionNewFactory(tutor=cls.tutor, function=function.PROFESSOR) cls.attribution_new_without_attribution_charge = AttributionNewFactory(tutor=cls.tutor, function=function.PROFESSOR) cls.learning_component_year_lecturing = LearningComponentYearFactory(type=component_type.LECTURING) cls.learning_component_year_practical = LearningComponentYearFactory(type=component_type.PRACTICAL_EXERCISES) cls.attribution_charge_new_lecturing = \ AttributionChargeNewFactory(attribution=cls.attribution_new, learning_component_year=cls.learning_component_year_lecturing, allocation_charge=10) cls.attribution_charge_new_practical = \ AttributionChargeNewFactory(attribution=cls.attribution_new, learning_component_year=cls.learning_component_year_practical, allocation_charge=20) def test_search_with_attribution(self): result = attribution_charge_new.search(attribution=self.attribution_new) self.assertCountEqual(result, [self.attribution_charge_new_lecturing, self.attribution_charge_new_practical]) def test_search_with_learning_component_year(self): result = attribution_charge_new.search(learning_component_year=self.learning_component_year_practical) self.assertCountEqual(result, [self.attribution_charge_new_practical]) def test_search_with_learning_component_year_list(self): learning_component_year_list = [self.learning_component_year_lecturing, self.learning_component_year_practical] result = attribution_charge_new.search(learning_component_year=learning_component_year_list) self.assertCountEqual(result, [self.attribution_charge_new_practical, self.attribution_charge_new_lecturing]) def test_str_function(self): self.assertEqual(self.attribution_charge_new_lecturing.__str__(), "DOE, John - PROFESSOR")
kylerbrown/arfview
refs/heads/master
_arfview/rasterPlot.py
2
import pyqtgraph as pg import numpy as np # class rasterTick(pg.GraphItem): # def __init__(self, toe, trial_number): # super(rasterTick,self).__init__() # x = (toe, toe) # y = (trial_number - .5, trial_number + .5) # self.setData(x, y) class rasterPlot(pg.PlotItem): """Plots a raster plot from a list of array-like objects containing event times. All the tick marks together are a single object. This is accomplished using a pyqtgraph.GraphItem, which is used to plot graph theory graphs""" def __init__(self, toes, *args, **kwargs): super(rasterPlot, self).__init__(*args,**kwargs) # if not isinstance(toes, list) or toes.ndim > 1: # raise ValueError('Event times must be given as ndarray of dimension 2 or less') n_toes = sum(len(t) for t in toes) pos = np.zeros((n_toes*2,2)) #array of vertex positions k=0 for trial_idx, trial_toes in enumerate(toes): pos[k:k+len(trial_toes)*2,0] = trial_toes.repeat(2) pos[k:k+len(trial_toes)*2:2,1] = 1 + trial_idx - .5 pos[k+1:k+len(trial_toes)*2:2,1] = 1 + trial_idx + .5 k+=len(trial_toes)*2 adj = np.arange(n_toes*2).reshape(n_toes,2) #connections of graph self.graph_item = pg.GraphItem(pos=pos,adj=adj,size=0) self.addItem(self.graph_item) self.setMouseEnabled(y=False) # def add_trial(self, toes): # self.ntrials += 1 # for idx, t in enumerate(toes): # self.plot([t,t], [self.ntrials-.5, self.ntrials+.5])
efiring/scipy
refs/heads/master
scipy/weave/examples/vtk_example.py
100
""" A simple example to show how to use weave with VTK. This lets one create VTK objects using the standard VTK-Python API (via 'import vtk') and then accelerate any of the computations by inlining C++ code inside Python. Please note the use of the `inc_dirs` and the `lib_dirs` variables in the call to weave.inline. Point these to where your VTK headers are and where the shared libraries are. For every VTK object encountered the corresponding VTK header is automatically added to the C++ code generated. If you need to add other headers specified like so:: headers=['"vtkHeader1.h"', '"vtkHeader2.h"'] in the keyword arguments to weave.inline. Similarly, by default, vtkCommon is linked into the generated module. If you need to link to any of the other vtk libraries add something like so:: libraries=['vtkHybrid', 'vtkFiltering'] in the keyword arguments to weave.inline. For example:: weave.inline(code, ['arr', 'v_arr'], include_dirs = ['/usr/local/include/vtk'], library_dirs = ['/usr/local/lib/vtk'], headers=['"vtkHeader1.h"', '"vtkHeader2.h"'], libraries=['vtkHybrid', 'vtkFiltering']) This module has been tested to work with VTK-4.2 and VTK-4.4 under Linux. YMMV on other platforms. Author: Prabhu Ramachandran Copyright (c) 2004, Prabhu Ramachandran License: BSD Style. """ from __future__ import absolute_import, print_function import scipy.weave as weave import vtk import numpy import sys import time # Please change these to suit your needs. If not, this example will # not compile. inc_dirs = ['/usr/local/include/vtk', '/usr/include/vtk'] lib_dirs = ['/usr/local/lib/vtk', '/usr/lib/vtk'] def simple_test(): """A simple example of how you can access the methods of a VTK object created from Python in C++ using weave.inline. """ a = vtk.vtkStructuredPoints() a.SetOrigin(1.0, 1.0, 1.0) print("sys.getrefcount(a) = ", sys.getrefcount(a)) code = r""" printf("a->ClassName() == %s\n", a->GetClassName()); printf("a->GetReferenceCount() == %d\n", a->GetReferenceCount()); double *origin = a->GetOrigin(); printf("Origin = %f, %f, %f\n", origin[0], origin[1], origin[2]); """ weave.inline(code, ['a'], include_dirs=inc_dirs, library_dirs=lib_dirs) print("sys.getrefcount(a) = ", sys.getrefcount(a)) def array_test(): """Tests if a large numpy array can be copied into a vtkFloatArray rapidly by using weave.inline. """ # Create a large numpy array. arr = numpy.arange(0, 10, 0.0001, 'f') print("Number of elements in array = ", arr.shape[0]) # Copy it into a vtkFloatArray and time the process. v_arr = vtk.vtkFloatArray() ts = time.clock() for i in range(arr.shape[0]): v_arr.InsertNextValue(arr[i]) print("Time taken to do it in pure Python =", time.clock() - ts) # Now do the same thing using weave.inline v_arr = vtk.vtkFloatArray() code = """ int size = Narr[0]; for (int i=0; i<size; ++i) v_arr->InsertNextValue(arr[i]); """ ts = time.clock() # Note the use of the include_dirs and library_dirs. weave.inline(code, ['arr', 'v_arr'], include_dirs=inc_dirs, library_dirs=lib_dirs) print("Time taken to do it using Weave =", time.clock() - ts) # Test the data to make certain that we have done it right. print("Checking data.") for i in range(v_arr.GetNumberOfTuples()): val = (v_arr.GetValue(i) - arr[i]) assert (val < 1e-6), "i = %d, val= %f" % (i, val) print("OK.") if __name__ == "__main__": simple_test() array_test()
anhstudios/swganh
refs/heads/develop
data/scripts/templates/object/tangible/lair/structure/exterior/shared_lair_cave_large_exterior_kai-tok.py
2
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/lair/structure/exterior/shared_lair_cave_large_exterior_kai-tok.iff" result.attribute_template_id = -1 result.stfName("lair_n","cave_large_exterior_kaitok") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
bkerler/ida
refs/heads/master
plugins/rizzo/rizzo.py
7
########################################################################## # An IDAPython plugin that generates "fuzzy" function signatures that can # be shared and applied amongst different IDBs. # # There are multiple sets of signatures that are generated: # # o "Formal" signatures, where functions must match exactly # o "Fuzzy" signatures, where functions must only resemble each other # in terms of data/call references. # o String-based signatures, where functions are identified based on # unique string references. # o Immediate-based signatures, where functions are identified based # on immediate value references. # # These signatures are applied based on accuracy, that is, formal # signatures are applied first, then string and immediate based # signatures, and finally fuzzy signatures. # # Further, functions are identified based on call references. Consider, # for example, two functions, one named 'foo', the other named 'bar'. # The 'foo' function is fairly unique and a reliable signature is easily # generated for it, but the 'bar' function is more difficult to reliably # identify. However, 'foo' calls 'bar', and thus once 'foo' is identified, # 'bar' can also be identified by association. # # Craig Heffner # @devttys0 ########################################################################## import idc import idaapi import idautils import os import sys import time import pickle # http://natashenka.ca/pickle/ import collections class RizzoSignatures(object): ''' Simple wrapper class for storing signature info. ''' SHOW = [] def __init__(self): self.fuzzy = {} self.formal = {} self.strings = {} self.functions = {} self.immediates = {} self.fuzzydups = set() self.formaldups = set() self.stringdups = set() self.immediatedups = set() def show(self): if not self.SHOW: return print "\n\nGENERATED FORMAL SIGNATURES FOR:" for (key, ea) in self.formal.iteritems(): func = RizzoFunctionDescriptor(self.formal, self.functions, key) if func.name in self.SHOW: print func.name print "\n\nGENERATRED FUZZY SIGNATURES FOR:" for (key, ea) in self.fuzzy.iteritems(): func = RizzoFunctionDescriptor(self.fuzzy, self.functions, key) if func.name in self.SHOW: print func.name class RizzoStringDescriptor(object): ''' Wrapper class for easily accessing necessary string information. ''' def __init__(self, string): self.ea = string.ea self.value = str(string) self.xrefs = [x.frm for x in idautils.XrefsTo(self.ea)] class RizzoBlockDescriptor(object): ''' Code block info is stored in tuples, which minimize pickle storage space. This class provides more Pythonic (and sane) access to values of interest for a given block. ''' def __init__(self, block): self.formal = block[0] self.fuzzy = block[1] self.immediates = block[2] self.functions = block[3] def match(self, nblock, fuzzy=False): # TODO: Fuzzy matching at the block level gets close, but produces a higher number of # false positives; for example, it confuses hmac_md5 with hmac_sha1. #return ((self.formal == nblock.formal or (fuzzy and self.fuzzy == nblock.fuzzy)) and return (self.formal == nblock.formal and len(self.immediates) == len(nblock.immediates) and len(self.functions) == len(nblock.functions)) class RizzoFunctionDescriptor(object): ''' Function signature info is stored in dicts and tuples, which minimize pickle storage space. This class provides more Pythonic (and sane) access to values of interest for a given function. ''' def __init__(self, signatures, functions, key): self.ea = signatures[key] self.name = functions[self.ea][0] self.blocks = functions[self.ea][1] class Rizzo(object): ''' Workhorse class which performs the primary logic and functionality. ''' DEFAULT_SIGNATURE_FILE = "rizzo.sig" def __init__(self, sigfile=None): if sigfile: self.sigfile = sigfile else: self.sigfile = self.DEFAULT_SIGNATURE_FILE # Useful for quickly identifying string xrefs from individual instructions self.strings = {} for string in idautils.Strings(): self.strings[string.ea] = RizzoStringDescriptor(string) start = time.time() self.signatures = self.generate() end = time.time() print "Generated %d formal signatures and %d fuzzy signatures for %d functions in %.2f seconds." % (len(self.signatures.formal), len(self.signatures.fuzzy), len(self.signatures.functions), (end-start)) def save(self): print ("Saving signatures to %s..." % self.sigfile), fp = open(self.sigfile, "wb") pickle.dump(self.signatures, fp) fp.close() print "done." def load(self): print ("Loading signatures from %s..." % self.sigfile), fp = open(self.sigfile, "rb") sigs = pickle.load(fp) fp.close() print "done." return sigs def sighash(self, value): return hash(str(value)) & 0xFFFFFFFF def block(self, block): ''' Returns a tuple: ([formal, block, signatures], [fuzzy, block, signatures], set([unique, immediate, values]), [called, function, names]) ''' formal = [] fuzzy = [] functions = [] immediates = [] ea = block.startEA while ea < block.endEA: idaapi.decode_insn(ea) # Get a list of all data/code references from the current instruction drefs = [x for x in idautils.DataRefsFrom(ea)] crefs = [x for x in idautils.CodeRefsFrom(ea, False)] # Add all instruction mnemonics to the formal block hash formal.append(idc.GetMnem(ea)) # If this is a call instruction, be sure to note the name of the function # being called. This is used to apply call-based signatures to functions. # # For fuzzy signatures, we can't use the actual name or EA of the function, # but rather just want to note that a function call was made. # # Formal signatures already have the call instruction mnemonic, which is more # specific than just saying that a call was made. if idaapi.is_call_insn(ea): for cref in crefs: func_name = idc.Name(cref) if func_name: functions.append(func_name) fuzzy.append("funcref") # If there are data references from the instruction, check to see if any of them # are strings. These are looked up in the pre-generated strings dictionary. # # String values are easily identifiable, and are used as part of both the fuzzy # and the formal signatures. # # It is more difficult to determine if non-string values are constants or not; # for both fuzzy and formal signatures, just use "data" to indicate that some data # was referenced. elif drefs: for dref in drefs: if self.strings.has_key(dref): formal.append(self.strings[dref].value) fuzzy.append(self.strings[dref].value) else: formal.append("dataref") fuzzy.append("dataref") # If there are no data or code references from the instruction, use every operand as # part of the formal signature. # # Fuzzy signatures are only concerned with interesting immediate values, that is, values # that are greater than 65,535, are not memory addresses, and are not displayed as # negative values. elif not drefs and not crefs: for n in range(0, len(idaapi.cmd.Operands)): opnd_text = idc.GetOpnd(ea, n) formal.append(opnd_text) if idaapi.cmd.Operands[n].type == idaapi.o_imm and not opnd_text.startswith('-'): if idaapi.cmd.Operands[n].value >= 0xFFFF: if idaapi.getFlags(idaapi.cmd.Operands[n].value) == 0: fuzzy.append(str(idaapi.cmd.Operands[n].value)) immediates.append(idaapi.cmd.Operands[n].value) ea = idc.NextHead(ea) return (self.sighash(''.join(formal)), self.sighash(''.join(fuzzy)), immediates, functions) def function(self, func): ''' Returns a list of blocks. ''' blocks = [] for block in idaapi.FlowChart(func): blocks.append(self.block(block)) return blocks def generate(self): signatures = RizzoSignatures() # Generate unique string-based function signatures for (ea, string) in self.strings.iteritems(): # Only generate signatures on reasonably long strings with one xref if len(string.value) >= 8 and len(string.xrefs) == 1: func = idaapi.get_func(string.xrefs[0]) if func: strhash = self.sighash(string.value) # Check for and remove string duplicate signatures (the same # string can appear more than once in an IDB). # If no duplicates, add this to the string signature dict. if signatures.strings.has_key(strhash): del signatures.strings[strhash] signatures.stringdups.add(strhash) elif strhash not in signatures.stringdups: signatures.strings[strhash] = func.startEA # Generate formal, fuzzy, and immediate-based function signatures for ea in idautils.Functions(): func = idaapi.get_func(ea) if func: # Generate a signature for each block in this function blocks = self.function(func) # Build function-wide formal and fuzzy signatures by simply # concatenating the individual function block signatures. formal = self.sighash(''.join([str(e) for (e, f, i, c) in blocks])) fuzzy = self.sighash(''.join([str(f) for (e, f, i, c) in blocks])) # Add this signature to the function dictionary. signatures.functions[func.startEA] = (idc.Name(func.startEA), blocks) # Check for and remove formal duplicate signatures. # If no duplicates, add this to the formal signature dict. if signatures.formal.has_key(formal): del signatures.formal[formal] signatures.formaldups.add(formal) elif formal not in signatures.formaldups: signatures.formal[formal] = func.startEA # Check for and remove fuzzy duplicate signatures. # If no duplicates, add this to the fuzzy signature dict. if signatures.fuzzy.has_key(fuzzy): del signatures.fuzzy[fuzzy] signatures.fuzzydups.add(fuzzy) elif fuzzy not in signatures.fuzzydups: signatures.fuzzy[fuzzy] = func.startEA # Check for and remove immediate duplicate signatures. # If no duplicates, add this to the immediate signature dict. for (e, f, immediates, c) in blocks: for immediate in immediates: if signatures.immediates.has_key(immediate): del signatures.immediates[immediate] signatures.immediatedups.add(immediate) elif immediate not in signatures.immediatedups: signatures.immediates[immediate] = func.startEA # These need not be maintained across function calls, # and only add to the size of the saved signature file. signatures.fuzzydups = set() signatures.formaldups = set() signatures.stringdups = set() signatures.immediatedups = set() # DEBUG signatures.show() return signatures def match(self, extsigs): fuzzy = {} formal = {} strings = {} immediates = {} # Match formal function signatures start = time.time() for (extsig, ext_func_ea) in extsigs.formal.iteritems(): if self.signatures.formal.has_key(extsig): newfunc = RizzoFunctionDescriptor(extsigs.formal, extsigs.functions, extsig) curfunc = RizzoFunctionDescriptor(self.signatures.formal, self.signatures.functions, extsig) formal[curfunc] = newfunc end = time.time() print "Found %d formal matches in %.2f seconds." % (len(formal), (end-start)) # Match fuzzy function signatures start = time.time() for (extsig, ext_func_ea) in extsigs.fuzzy.iteritems(): if self.signatures.fuzzy.has_key(extsig): curfunc = RizzoFunctionDescriptor(self.signatures.fuzzy, self.signatures.functions, extsig) newfunc = RizzoFunctionDescriptor(extsigs.fuzzy, extsigs.functions, extsig) # Only accept this as a valid match if the functions have the same number of basic code blocks if len(curfunc.blocks) == len(newfunc.blocks): fuzzy[curfunc] = newfunc end = time.time() print "Found %d fuzzy matches in %.2f seconds." % (len(fuzzy), (end-start)) # Match string based function signatures start = time.time() for (extsig, ext_func_ea) in extsigs.strings.iteritems(): if self.signatures.strings.has_key(extsig): curfunc = RizzoFunctionDescriptor(self.signatures.strings, self.signatures.functions, extsig) newfunc = RizzoFunctionDescriptor(extsigs.strings, extsigs.functions, extsig) strings[curfunc] = newfunc end = time.time() print "Found %d string matches in %.2f seconds." % (len(strings), (end-start)) # Match immediate baesd function signatures start = time.time() for (extsig, ext_func_ea) in extsigs.immediates.iteritems(): if self.signatures.immediates.has_key(extsig): curfunc = RizzoFunctionDescriptor(self.signatures.immediates, self.signatures.functions, extsig) newfunc = RizzoFunctionDescriptor(extsigs.immediates, extsigs.functions, extsig) immediates[curfunc] = newfunc end = time.time() print "Found %d immediate matches in %.2f seconds." % (len(immediates), (end-start)) # Return signature matches in the order we want them applied # The second tuple of each match is set to True if it is a fuzzy match, e.g.: # # ((match, fuzzy), (match, fuzzy), ...) return ((formal, False), (strings, False), (immediates, False), (fuzzy, True)) def rename(self, ea, name): # Don't rely on the name in curfunc, as it could have already been renamed curname = idc.Name(ea) # Don't rename if the name is a special identifier, or if the ea has already been named # TODO: What's a better way to check for reserved name prefixes? if curname.startswith('sub_') and name.split('_')[0] not in set(['sub', 'loc', 'unk', 'dword', 'word', 'byte']): # Don't rename if the name already exists in the IDB if idc.LocByName(name) == idc.BADADDR: if idc.MakeName(ea, name): idc.SetFunctionFlags(ea, (idc.GetFunctionFlags(ea) | idc.FUNC_LIB)) #print "%s => %s" % (curname, name) return 1 #else: # print "WARNING: Attempted to rename '%s' => '%s', but '%s' already exists!" % (curname, name, name) return 0 def apply(self, extsigs): count = 0 start = time.time() # This applies formal matches first, then fuzzy matches for (match, fuzzy) in self.match(extsigs): # Keeps track of all function names that we've identified candidate functions for rename = {} for (curfunc, newfunc) in match.iteritems(): if not rename.has_key(newfunc.name): rename[newfunc.name] = [] # Attempt to rename this function rename[newfunc.name].append(curfunc.ea) bm = {} duplicates = set() # Search for unique matching code blocks inside this function for nblock in newfunc.blocks: nblock = RizzoBlockDescriptor(nblock) for cblock in curfunc.blocks: cblock = RizzoBlockDescriptor(cblock) if cblock.match(nblock, fuzzy): if bm.has_key(cblock): del bm[cblock] duplicates.add(cblock) elif cblock not in duplicates: bm[cblock] = nblock # Rename known function calls from each unique identified code block for (cblock, nblock) in bm.iteritems(): for n in range(0, len(cblock.functions)): ea = idc.LocByName(cblock.functions[n]) if ea != idc.BADADDR: if rename.has_key(nblock.functions[n]): rename[nblock.functions[n]].append(ea) else: rename[nblock.functions[n]] = [ea] # Rename the identified functions for (name, candidates) in rename.iteritems(): if candidates: winner = collections.Counter(candidates).most_common(1)[0][0] count += self.rename(winner, name) end = time.time() print "Renamed %d functions in %.2f seconds." % (count, (end-start)) def RizzoBuild(sigfile=None): print "Building Rizzo signatures, this may take a few minutes..." start = time.time() r = Rizzo(sigfile) r.save() end = time.time() print "Built signatures in %.2f seconds" % (end-start) def RizzoApply(sigfile=None): print "Applying Rizzo signatures, this may take a few minutes..." start = time.time() r = Rizzo(sigfile) s = r.load() r.apply(s) end = time.time() print "Signatures applied in %.2f seconds" % (end-start) class RizzoPlugin(idaapi.plugin_t): flags = 0 comment = "Function signature" help = "" wanted_name = "Rizzo" wanted_hotkey = "" NAME = "rizzo.py" def init(self): self.menu_context_load = idaapi.add_menu_item("File/Load file/", "Rizzo signature file...", "", 0, self.rizzo_load, (None,)) self.menu_context_produce = idaapi.add_menu_item("File/Produce file/", "Rizzo signature file...", "", 0, self.rizzo_produce, (True,)) return idaapi.PLUGIN_KEEP def term(self): idaapi.del_menu_item(self.menu_context_load) idaapi.del_menu_item(self.menu_context_produce) return None def run(self, arg): return None def rizzo_script(self): idaapi.IDAPython_ExecScript(self.script, globals()) def rizzo_produce(self, arg): fname = idc.AskFile(1, "*.riz", "Save signature file as") if fname: if '.' not in fname: fname += ".riz" RizzoBuild(fname) return None def rizzo_load(self, arg): fname = idc.AskFile(0, "*.riz", "Load signature file") if fname: RizzoApply(fname) return None def PLUGIN_ENTRY(): return RizzoPlugin()
RobertoMalatesta/phantomjs
refs/heads/master
src/breakpad/src/tools/gyp/test/subdirectory/gyptest-subdir-default.py
137
#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies building a subsidiary dependent target from a .gyp file in a subdirectory, without specifying an explicit output build directory, and using the subdirectory's solution or project file as the entry point. """ import TestGyp import errno test = TestGyp.TestGyp() test.run_gyp('prog1.gyp', chdir='src') test.relocate('src', 'relocate/src') chdir = 'relocate/src/subdir' # Make can build sub-projects, but it's still through the top-level Makefile, # and there is no 'default' or 'all' sub-project, so the target must be # explicit. # TODO(mmoss) Should make create self-contained, sub-project Makefiles, # equilvalent to the sub-project .sln/SConstruct/etc. files of other generators? if test.format == 'make': chdir = 'relocate/src' test.build('prog2.gyp', 'prog2', chdir=chdir) else: test.build('prog2.gyp', chdir=chdir) test.built_file_must_not_exist('prog1', type=test.EXECUTABLE, chdir=chdir) test.run_built_executable('prog2', chdir=chdir, stdout="Hello from prog2.c\n") test.pass_test()
Jimmy-Morzaria/scikit-learn
refs/heads/master
sklearn/lda.py
15
""" Linear Discriminant Analysis (LDA) """ # Authors: Clemens Brunner # Martin Billinger # Matthieu Perrot # Mathieu Blondel # License: BSD 3-Clause from __future__ import print_function import warnings import numpy as np from scipy import linalg from .externals.six import string_types from .base import BaseEstimator, TransformerMixin from .linear_model.base import LinearClassifierMixin from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance from .utils.multiclass import unique_labels from .utils import check_array, check_X_y from .utils.validation import check_is_fitted from .utils.fixes import bincount from .preprocessing import StandardScaler __all__ = ['LDA'] def _cov(X, shrinkage=None): """Estimate covariance matrix (using optional shrinkage). Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. shrinkage : string or float, optional Shrinkage parameter, possible values: - None or 'empirical': no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Returns ------- s : array, shape (n_features, n_features) Estimated covariance matrix. """ shrinkage = "empirical" if shrinkage is None else shrinkage if isinstance(shrinkage, string_types): if shrinkage == 'auto': sc = StandardScaler() # standardize features X = sc.fit_transform(X) s = sc.std_ * ledoit_wolf(X)[0] * sc.std_ # scale back elif shrinkage == 'empirical': s = empirical_covariance(X) else: raise ValueError('unknown shrinkage parameter') elif isinstance(shrinkage, float) or isinstance(shrinkage, int): if shrinkage < 0 or shrinkage > 1: raise ValueError('shrinkage parameter must be between 0 and 1') s = shrunk_covariance(empirical_covariance(X), shrinkage) else: raise TypeError('shrinkage must be of string or int type') return s def _class_means(X, y): """Compute class means. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- means : array-like, shape (n_features,) Class means. """ means = [] classes = np.unique(y) for group in classes: Xg = X[y == group, :] means.append(Xg.mean(0)) return np.asarray(means) def _class_cov(X, y, priors=None, shrinkage=None): """Compute class covariance matrix. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. priors : array-like, shape (n_classes,) Class priors. shrinkage : string or float, optional Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Returns ------- cov : array-like, shape (n_features, n_features) Class covariance matrix. """ classes = np.unique(y) covs = [] for group in classes: Xg = X[y == group, :] covs.append(np.atleast_2d(_cov(Xg, shrinkage))) return np.average(covs, axis=0, weights=priors) class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin): """Linear Discriminant Analysis (LDA). A classifier with a linear decision boundary, generated by fitting class conditional densities to the data and using Bayes' rule. The model fits a Gaussian density to each class, assuming that all classes share the same covariance matrix. The fitted model can also be used to reduce the dimensionality of the input by projecting it to the most discriminative directions. Parameters ---------- solver : string, optional Solver to use, possible values: - 'svd': Singular value decomposition (default). Does not compute the covariance matrix, therefore this solver is recommended for data with a large number of features. - 'lsqr': Least squares solution, can be combined with shrinkage. - 'eigen': Eigenvalue decomposition, can be combined with shrinkage. shrinkage : string or float, optional Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Note that shrinkage works only with 'lsqr' and 'eigen' solvers. priors : array, optional, shape (n_classes,) Class priors. n_components : int, optional Number of components (< n_classes - 1) for dimensionality reduction. store_covariance : bool, optional Additionally compute class covariance matrix (default False). tol : float, optional Threshold used for rank estimation in SVD solver. Attributes ---------- coef_ : array, shape (n_features,) or (n_classes, n_features) Weight vector(s). intercept_ : array, shape (n_features,) Intercept term. covariance_ : array-like, shape (n_features, n_features) Covariance matrix (shared by all classes). means_ : array-like, shape (n_classes, n_features) Class means. priors_ : array-like, shape (n_classes,) Class priors (sum to 1). scalings_ : array-like, shape (rank, n_classes - 1) Scaling of the features in the space spanned by the class centroids. xbar_ : array-like, shape (n_features,) Overall mean. classes_ : array-like, shape (n_classes,) Unique class labels. See also -------- sklearn.qda.QDA: Quadratic discriminant analysis Notes ----- The default solver is 'svd'. It can perform both classification and transform, and it does not rely on the calculation of the covariance matrix. This can be an advantage in situations where the number of features is large. However, the 'svd' solver cannot be used with shrinkage. The 'lsqr' solver is an efficient algorithm that only works for classification. It supports shrinkage. The 'eigen' solver is based on the optimization of the between class scatter to within class scatter ratio. It can be used for both classification and transform, and it supports shrinkage. However, the 'eigen' solver needs to compute the covariance matrix, so it might not be suitable for situations with a high number of features. Examples -------- >>> import numpy as np >>> from sklearn.lda import LDA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> y = np.array([1, 1, 1, 2, 2, 2]) >>> clf = LDA() >>> clf.fit(X, y) LDA(n_components=None, priors=None, shrinkage=None, solver='svd', store_covariance=False, tol=0.0001) >>> print(clf.predict([[-0.8, -1]])) [1] """ def __init__(self, solver='svd', shrinkage=None, priors=None, n_components=None, store_covariance=False, tol=1e-4): self.solver = solver self.shrinkage = shrinkage self.priors = priors self.n_components = n_components self.store_covariance = store_covariance # used only in svd solver self.tol = tol # used only in svd solver def _solve_lsqr(self, X, y, shrinkage): """Least squares solver. The least squares solver computes a straightforward solution of the optimal decision rule based directly on the discriminant functions. It can only be used for classification (with optional shrinkage), because estimation of eigenvectors is not performed. Therefore, dimensionality reduction with the transform is not supported. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_classes) Target values. shrinkage : string or float, optional Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage parameter. Notes ----- This solver is based on [1]_, section 2.6.2, pp. 39-41. References ---------- .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN 0-471-05669-3. """ self.means_ = _class_means(X, y) self.covariance_ = _class_cov(X, y, self.priors_, shrinkage) self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(self.priors_)) def _solve_eigen(self, X, y, shrinkage): """Eigenvalue solver. The eigenvalue solver computes the optimal solution of the Rayleigh coefficient (basically the ratio of between class scatter to within class scatter). This solver supports both classification and dimensionality reduction (with optional shrinkage). Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. shrinkage : string or float, optional Shrinkage parameter, possible values: - None: no shrinkage (default). - 'auto': automatic shrinkage using the Ledoit-Wolf lemma. - float between 0 and 1: fixed shrinkage constant. Notes ----- This solver is based on [1]_, section 3.8.3, pp. 121-124. References ---------- .. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification (Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN 0-471-05669-3. """ self.means_ = _class_means(X, y) self.covariance_ = _class_cov(X, y, self.priors_, shrinkage) Sw = self.covariance_ # within scatter St = _cov(X, shrinkage) # total scatter Sb = St - Sw # between scatter evals, evecs = linalg.eigh(Sb, Sw) evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors # evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6 evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs) self.scalings_ = evecs self.coef_ = np.dot(self.means_, evecs).dot(evecs.T) self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) + np.log(self.priors_)) def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4): """SVD solver. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. store_covariance : bool, optional Additionally compute class covariance matrix (default False). tol : float, optional Threshold used for rank estimation. """ n_samples, n_features = X.shape n_classes = len(self.classes_) self.means_ = _class_means(X, y) if store_covariance: self.covariance_ = _class_cov(X, y, self.priors_) Xc = [] for idx, group in enumerate(self.classes_): Xg = X[y == group, :] Xc.append(Xg - self.means_[idx]) self.xbar_ = np.dot(self.priors_, self.means_) Xc = np.concatenate(Xc, axis=0) # 1) within (univariate) scaling by with classes std-dev std = Xc.std(axis=0) # avoid division by zero in normalization std[std == 0] = 1. fac = 1. / (n_samples - n_classes) # 2) Within variance scaling X = np.sqrt(fac) * (Xc / std) # SVD of centered (within)scaled data U, S, V = linalg.svd(X, full_matrices=False) rank = np.sum(S > tol) if rank < n_features: warnings.warn("Variables are collinear.") # Scaling of within covariance is: V' 1/S scalings = (V[:rank] / std).T / S[:rank] # 3) Between variance scaling # Scale weighted centers X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) * (self.means_ - self.xbar_).T).T, scalings) # Centers are living in a space with n_classes-1 dim (maximum) # Use SVD to find projection in the space spanned by the # (n_classes) centers _, S, V = linalg.svd(X, full_matrices=0) rank = np.sum(S > tol * S[0]) self.scalings_ = np.dot(scalings, V.T[:, :rank]) coef = np.dot(self.means_ - self.xbar_, self.scalings_) self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) + np.log(self.priors_)) self.coef_ = np.dot(coef, self.scalings_.T) self.intercept_ -= np.dot(self.xbar_, self.coef_.T) def fit(self, X, y, store_covariance=False, tol=1.0e-4): """Fit LDA model according to the given training data and parameters. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array, shape (n_samples,) Target values. """ if store_covariance: warnings.warn("'store_covariance' was moved to the __init__()" "method in version 0.16 and will be removed from" "fit() in version 0.18.", DeprecationWarning) else: store_covariance = self.store_covariance if tol != 1.0e-4: warnings.warn("'tol' was moved to __init__() method in version" " 0.16 and will be removed from fit() in 0.18", DeprecationWarning) self.tol = tol X, y = check_X_y(X, y) self.classes_ = unique_labels(y) if self.priors is None: # estimate priors from sample _, y_t = np.unique(y, return_inverse=True) # non-negative ints self.priors_ = bincount(y_t) / float(len(y)) else: self.priors_ = self.priors if self.solver == 'svd': if self.shrinkage is not None: raise NotImplementedError('shrinkage not supported') self._solve_svd(X, y, store_covariance=store_covariance, tol=tol) elif self.solver == 'lsqr': self._solve_lsqr(X, y, shrinkage=self.shrinkage) elif self.solver == 'eigen': self._solve_eigen(X, y, shrinkage=self.shrinkage) else: raise ValueError("unknown solver {} (valid solvers are 'svd', " "'lsqr', and 'eigen').".format(self.solver)) if self.classes_.size == 2: # treat binary case as a special case self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2) self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0], ndmin=1) return self def transform(self, X): """Project data to maximize class separation. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. Returns ------- X_new : array, shape (n_samples, n_components) Transformed data. """ if self.solver == 'lsqr': raise NotImplementedError("transform not implemented for 'lsqr' " "solver (use 'svd' or 'eigen').") check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any) X = check_array(X) if self.solver == 'svd': X_new = np.dot(X - self.xbar_, self.scalings_) elif self.solver == 'eigen': X_new = np.dot(X, self.scalings_) n_components = X.shape[1] if self.n_components is None \ else self.n_components return X_new[:, :n_components] def predict_proba(self, X): """Estimate probability. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. Returns ------- C : array, shape (n_samples, n_classes) Estimated probabilities. """ prob = self.decision_function(X) prob *= -1 np.exp(prob, prob) prob += 1 np.reciprocal(prob, prob) if len(self.classes_) == 2: # binary case return np.column_stack([1 - prob, prob]) else: # OvR normalization, like LibLinear's predict_probability prob /= prob.sum(axis=1).reshape((prob.shape[0], -1)) return prob def predict_log_proba(self, X): """Estimate log probability. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. Returns ------- C : array, shape (n_samples, n_classes) Estimated log probabilities. """ return np.log(self.predict_proba(X))
mcking49/apache-flask
refs/heads/master
Python/Lib/site-packages/pylint/test/functional/fallback_import_disabled.py
12
# pylint: disable=missing-docstring,unused-import try: import urllib2 as urllib_request #@ import urllib2 as urllib_error from urlparse import urlparse except ImportError: # python2 from urllib import request as urllib_request from urllib import error as urllib_error from urllib.parse import urlparseq
neerajvashistha/pa-dude
refs/heads/master
lib/python2.7/site-packages/django/contrib/sites/__init__.py
808
default_app_config = 'django.contrib.sites.apps.SitesConfig'
lmaurits/phyltr
refs/heads/master
src/phyltr/__init__.py
1
from .__main__ import run_command, build_pipeline, COMMANDS # noqa: F401
cloudant/mango
refs/heads/master
test/01-index-crud-test.py
1
# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. import random import mango class IndexCrudTests(mango.DbPerClass): def test_bad_fields(self): bad_fields = [ None, True, False, "bing", 2.0, {"foo": "bar"}, [{"foo": 2}], [{"foo": "asc", "bar": "desc"}], [{"foo": "asc"}, {"bar": "desc"}] ] for fields in bad_fields: try: self.db.create_index(fields) except Exception, e: assert e.response.status_code == 400 else: raise AssertionError("bad create index") def test_bad_types(self): bad_types = [ None, True, False, 1.5, "foo", # Future support "geo", # Future support {"foo": "bar"}, ["baz", 3.0] ] for bt in bad_types: try: self.db.create_index(["foo"], idx_type=bt) except Exception, e: assert e.response.status_code == 400, (bt, e.response.status_code) else: raise AssertionError("bad create index") def test_bad_names(self): bad_names = [ True, False, 1.5, {"foo": "bar"}, [None, False] ] for bn in bad_names: try: self.db.create_index(["foo"], name=bn) except Exception, e: assert e.response.status_code == 400 else: raise AssertionError("bad create index") try: self.db.create_index(["foo"], ddoc=bn) except Exception, e: assert e.response.status_code == 400 else: raise AssertionError("bad create index") def test_create_idx_01(self): fields = ["foo", "bar"] ret = self.db.create_index(fields, name="idx_01") assert ret is True for idx in self.db.list_indexes(): if idx["name"] != "idx_01": continue assert idx["def"]["fields"] == [{"foo": "asc"}, {"bar": "asc"}] return raise AssertionError("index not created") def test_create_idx_01_exists(self): fields = ["foo", "bar"] ret = self.db.create_index(fields, name="idx_01") assert ret is False def test_create_idx_02(self): fields = ["baz", "foo"] ret = self.db.create_index(fields, name="idx_02") assert ret is True for idx in self.db.list_indexes(): if idx["name"] != "idx_02": continue assert idx["def"]["fields"] == [{"baz": "asc"}, {"foo": "asc"}] return raise AssertionError("index not created") def test_read_idx_doc(self): for idx in self.db.list_indexes(): if idx["type"] == "special": continue ddocid = idx["ddoc"] doc = self.db.open_doc(ddocid) assert doc["_id"] == ddocid info = self.db.ddoc_info(ddocid) assert info["name"] == ddocid def test_delete_idx_escaped(self): pre_indexes = self.db.list_indexes() ret = self.db.create_index(["bing"], name="idx_del_1") assert ret is True for idx in self.db.list_indexes(): if idx["name"] != "idx_del_1": continue assert idx["def"]["fields"] == [{"bing": "asc"}] self.db.delete_index(idx["ddoc"].replace("/", "%2F"), idx["name"]) post_indexes = self.db.list_indexes() assert pre_indexes == post_indexes def test_delete_idx_unescaped(self): pre_indexes = self.db.list_indexes() ret = self.db.create_index(["bing"], name="idx_del_2") assert ret is True for idx in self.db.list_indexes(): if idx["name"] != "idx_del_2": continue assert idx["def"]["fields"] == [{"bing": "asc"}] self.db.delete_index(idx["ddoc"], idx["name"]) post_indexes = self.db.list_indexes() assert pre_indexes == post_indexes def test_delete_idx_no_design(self): pre_indexes = self.db.list_indexes() ret = self.db.create_index(["bing"], name="idx_del_3") assert ret is True for idx in self.db.list_indexes(): if idx["name"] != "idx_del_3": continue assert idx["def"]["fields"] == [{"bing": "asc"}] self.db.delete_index(idx["ddoc"].split("/")[-1], idx["name"]) post_indexes = self.db.list_indexes() assert pre_indexes == post_indexes def test_bulk_delete(self): fields = ["field1"] ret = self.db.create_index(fields, name="idx_01") assert ret is True fields = ["field2"] ret = self.db.create_index(fields, name="idx_02") assert ret is True fields = ["field3"] ret = self.db.create_index(fields, name="idx_03") assert ret is True docids = [] for idx in self.db.list_indexes(): if idx["ddoc"] is not None: docids.append(idx["ddoc"]) docids.append("_design/this_is_not_an_index_name") ret = self.db.bulk_delete(docids) assert ret["error"][0]["id"] == "_design/this_is_not_an_index_name" assert len(ret["success"]) == 3 for idx in self.db.list_indexes(): assert idx["type"] != "json" assert idx["type"] != "text" def test_recreate_index(self): pre_indexes = self.db.list_indexes() for i in range(5): ret = self.db.create_index(["bing"], name="idx_recreate") assert ret is True for idx in self.db.list_indexes(): if idx["name"] != "idx_recreate": continue assert idx["def"]["fields"] == [{"bing": "asc"}] self.db.delete_index(idx["ddoc"], idx["name"]) break post_indexes = self.db.list_indexes() assert pre_indexes == post_indexes def test_delete_misisng(self): # Missing design doc try: self.db.delete_index("this_is_not_a_design_doc_id", "foo") except Exception, e: assert e.response.status_code == 404 else: raise AssertionError("bad index delete") # Missing view name indexes = self.db.list_indexes() not_special = [idx for idx in indexes if idx["type"] != "special"] idx = random.choice(not_special) ddocid = idx["ddoc"].split("/")[-1] try: self.db.delete_index(ddocid, "this_is_not_an_index_name") except Exception, e: assert e.response.status_code == 404 else: raise AssertionError("bad index delete") # Bad view type try: self.db.delete_index(ddocid, idx["name"], idx_type="not_a_real_type") except Exception, e: assert e.response.status_code == 404 else: raise AssertionError("bad index delete") def test_create_text_idx(self): fields = [ {"name":"stringidx", "type" : "string"}, {"name":"booleanidx", "type": "boolean"} ] ret = self.db.create_text_index(fields=fields, name="text_idx_01") assert ret is True for idx in self.db.list_indexes(): if idx["name"] != "text_idx_01": continue print idx["def"] assert idx["def"]["fields"] == [ {"stringidx": "string"}, {"booleanidx": "boolean"} ] return raise AssertionError("index not created") def test_create_bad_text_idx(self): bad_fields = [ True, False, "bing", 2.0, ["foo", "bar"], [{"name": "foo2"}], [{"name": "foo3", "type": "garbage"}], [{"type": "number"}], [{"name": "age", "type": "number"} , {"name": "bad"}], [{"name": "age", "type": "number"} , "bla"] ] for fields in bad_fields: try: self.db.create_text_index(fields=fields) except Exception, e: assert e.response.status_code == 400 else: raise AssertionError("bad create text index")
venomJ/AndroidViewClient
refs/heads/master
examples/click-button-by-text.py
9
#! /usr/bin/env python ''' Copyright (C) 2012 Diego Torres Milano Created on May 5, 2012 @author: diego ''' import sys import os import time try: sys.path.append(os.path.join(os.environ['ANDROID_VIEW_CLIENT_HOME'], 'src')) except: pass from com.dtmilano.android.viewclient import ViewClient vc = ViewClient(*ViewClient.connectToDeviceOrExit()) for bt in [ 'One', 'Two', 'Three', 'Four', 'Five' ]: b = vc.findViewWithText(bt) if b: (x, y) = b.getXY() print >>sys.stderr, "clicking b%s @ (%d,%d) ..." % (bt, x, y) b.touch() else: print >>sys.stderr, "b%s not found" % bt time.sleep(7) print >>sys.stderr, "bye"
conorpp/napkis
refs/heads/master
napkis/deployment/python2.7/django/core/validators.py
107
from __future__ import unicode_literals import re try: from urllib.parse import urlsplit, urlunsplit except ImportError: # Python 2 from urlparse import urlsplit, urlunsplit from django.core.exceptions import ValidationError from django.utils.translation import ugettext_lazy as _ from django.utils.encoding import force_text from django.utils.ipv6 import is_valid_ipv6_address from django.utils import six # These values, if given to validate(), will trigger the self.required check. EMPTY_VALUES = (None, '', [], (), {}) class RegexValidator(object): regex = '' message = _('Enter a valid value.') code = 'invalid' def __init__(self, regex=None, message=None, code=None): if regex is not None: self.regex = regex if message is not None: self.message = message if code is not None: self.code = code # Compile the regex if it was not passed pre-compiled. if isinstance(self.regex, six.string_types): self.regex = re.compile(self.regex) def __call__(self, value): """ Validates that the input matches the regular expression. """ if not self.regex.search(force_text(value)): raise ValidationError(self.message, code=self.code) class URLValidator(RegexValidator): regex = re.compile( r'^(?:http|ftp)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain... r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4 r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6 r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) def __call__(self, value): try: super(URLValidator, self).__call__(value) except ValidationError as e: # Trivial case failed. Try for possible IDN domain if value: value = force_text(value) scheme, netloc, path, query, fragment = urlsplit(value) try: netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE except UnicodeError: # invalid domain part raise e url = urlunsplit((scheme, netloc, path, query, fragment)) super(URLValidator, self).__call__(url) else: raise else: url = value def validate_integer(value): try: int(value) except (ValueError, TypeError): raise ValidationError('') class EmailValidator(RegexValidator): def __call__(self, value): try: super(EmailValidator, self).__call__(value) except ValidationError as e: # Trivial case failed. Try for possible IDN domain-part if value and '@' in value: parts = value.split('@') try: parts[-1] = parts[-1].encode('idna').decode('ascii') except UnicodeError: raise e super(EmailValidator, self).__call__('@'.join(parts)) else: raise email_re = re.compile( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom # quoted-string, see also http://tools.ietf.org/html/rfc2822#section-3.2.5 r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"' r')@((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)$)' # domain r'|\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$', re.IGNORECASE) # literal form, ipv4 address (SMTP 4.1.3) validate_email = EmailValidator(email_re, _('Enter a valid email address.'), 'invalid') slug_re = re.compile(r'^[-a-zA-Z0-9_]+$') validate_slug = RegexValidator(slug_re, _("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."), 'invalid') ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$') validate_ipv4_address = RegexValidator(ipv4_re, _('Enter a valid IPv4 address.'), 'invalid') def validate_ipv6_address(value): if not is_valid_ipv6_address(value): raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid') def validate_ipv46_address(value): try: validate_ipv4_address(value) except ValidationError: try: validate_ipv6_address(value) except ValidationError: raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid') ip_address_validator_map = { 'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')), 'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')), 'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')), } def ip_address_validators(protocol, unpack_ipv4): """ Depending on the given parameters returns the appropriate validators for the GenericIPAddressField. This code is here, because it is exactly the same for the model and the form field. """ if protocol != 'both' and unpack_ipv4: raise ValueError( "You can only use `unpack_ipv4` if `protocol` is set to 'both'") try: return ip_address_validator_map[protocol.lower()] except KeyError: raise ValueError("The protocol '%s' is unknown. Supported: %s" % (protocol, list(ip_address_validator_map))) comma_separated_int_list_re = re.compile('^[\d,]+$') validate_comma_separated_integer_list = RegexValidator(comma_separated_int_list_re, _('Enter only digits separated by commas.'), 'invalid') class BaseValidator(object): compare = lambda self, a, b: a is not b clean = lambda self, x: x message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).') code = 'limit_value' def __init__(self, limit_value): self.limit_value = limit_value def __call__(self, value): cleaned = self.clean(value) params = {'limit_value': self.limit_value, 'show_value': cleaned} if self.compare(cleaned, self.limit_value): raise ValidationError( self.message % params, code=self.code, params=params, ) class MaxValueValidator(BaseValidator): compare = lambda self, a, b: a > b message = _('Ensure this value is less than or equal to %(limit_value)s.') code = 'max_value' class MinValueValidator(BaseValidator): compare = lambda self, a, b: a < b message = _('Ensure this value is greater than or equal to %(limit_value)s.') code = 'min_value' class MinLengthValidator(BaseValidator): compare = lambda self, a, b: a < b clean = lambda self, x: len(x) message = _('Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).') code = 'min_length' class MaxLengthValidator(BaseValidator): compare = lambda self, a, b: a > b clean = lambda self, x: len(x) message = _('Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).') code = 'max_length'
pyfirst/samplecode
refs/heads/master
4_scraping/requests-json.py
2
# JSON形式のAPIレスポンスを取得 import requests r = requests.get('https://connpass.com/api/v1/event/?keyword=python') data = r.json() # JSONをデコードしたデータを取得 for event in data['events']: print(event['title'])
hujiajie/chromium-crosswalk
refs/heads/master
chrome/common/extensions/docs/server2/document_parser_test.py
121
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from document_parser import ParseDocument, RemoveTitle _WHOLE_DOCUMENT = ''' Preamble before heading. <h1 id='main' class='header'>Main header</h1> Some intro to the content. <h2 id='banana' class='header' title=''>Bananas</h2> Something about bananas. <h2 id='orange' title='hello'>Oranges</h2> Something about oranges. <h3 id='valencia'>Valencia Oranges</h3> A description of valencia oranges. <h3 id='seville'>Seville Oranges</h3> A description of seville oranges. <h2>Grapefruit</h3> Grapefruit closed a h2 with a h3. This should be a warning. <h1 id='not-main'>Not the main header</h1> But it should still show up in the TOC as though it were an h2. <h2>Not <h3>a banana</h2> The embedded h3 should be ignored. <h4>It's a h4</h4> h4 are part of the document structure, but this is not inside a h3. <h3>Plantains</h3> Now I'm just getting lazy. <h4>Another h4</h4> This h4 is inside a h3 so will show up. <h5>Header 5</h5> Header 5s are not parsed. ''' _WHOLE_DOCUMENT_WITHOUT_TITLE = ''' Preamble before heading. Some intro to the content. <h2 id='banana' class='header' title=''>Bananas</h2> Something about bananas. <h2 id='orange' title='hello'>Oranges</h2> Something about oranges. <h3 id='valencia'>Valencia Oranges</h3> A description of valencia oranges. <h3 id='seville'>Seville Oranges</h3> A description of seville oranges. <h2>Grapefruit</h3> Grapefruit closed a h2 with a h3. This should be a warning. <h1 id='not-main'>Not the main header</h1> But it should still show up in the TOC as though it were an h2. <h2>Not <h3>a banana</h2> The embedded h3 should be ignored. <h4>It's a h4</h4> h4 are part of the document structure, but this is not inside a h3. <h3>Plantains</h3> Now I'm just getting lazy. <h4>Another h4</h4> This h4 is inside a h3 so will show up. <h5>Header 5</h5> Header 5s are not parsed. ''' class DocumentParserUnittest(unittest.TestCase): def testEmptyDocument(self): self.assertEqual(('', 'No opening <h1> was found'), RemoveTitle('')) result = ParseDocument('') self.assertEqual(None, result.title) self.assertEqual(None, result.title_attributes) self.assertEqual([], result.sections) self.assertEqual([], result.warnings) result = ParseDocument('', expect_title=True) self.assertEqual('', result.title) self.assertEqual({}, result.title_attributes) self.assertEqual([], result.sections) self.assertEqual(['Expected a title'], result.warnings) def testRemoveTitle(self): no_closing_tag = '<h1>No closing tag' self.assertEqual((no_closing_tag, 'No closing </h1> was found'), RemoveTitle(no_closing_tag)) no_opening_tag = 'No opening tag</h1>' self.assertEqual((no_opening_tag, 'No opening <h1> was found'), RemoveTitle(no_opening_tag)) tags_wrong_order = '</h1>Tags in wrong order<h1>' self.assertEqual((tags_wrong_order, 'The </h1> appeared before the <h1>'), RemoveTitle(tags_wrong_order)) multiple_titles = '<h1>First header</h1> and <h1>Second header</h1>' self.assertEqual((' and <h1>Second header</h1>', None), RemoveTitle(multiple_titles)) upper_case = '<H1>Upper case header tag</H1> hi' self.assertEqual((' hi', None), RemoveTitle(upper_case)) mixed_case = '<H1>Mixed case header tag</h1> hi' self.assertEqual((' hi', None), RemoveTitle(mixed_case)) def testOnlyTitleDocument(self): document = '<h1 id="header">heading</h1>' self.assertEqual(('', None), RemoveTitle(document)) result = ParseDocument(document) self.assertEqual(None, result.title) self.assertEqual(None, result.title_attributes) self.assertEqual([], result.sections) self.assertEqual(['Found unexpected title "heading"'], result.warnings) result = ParseDocument(document, expect_title=True) self.assertEqual('heading', result.title) self.assertEqual({'id': 'header'}, result.title_attributes) self.assertEqual([], result.sections) self.assertEqual([], result.warnings) def testWholeDocument(self): self.assertEqual((_WHOLE_DOCUMENT_WITHOUT_TITLE, None), RemoveTitle(_WHOLE_DOCUMENT)) result = ParseDocument(_WHOLE_DOCUMENT, expect_title=True) self.assertEqual('Main header', result.title) self.assertEqual({'id': 'main', 'class': 'header'}, result.title_attributes) self.assertEqual([ 'Found closing </h3> while processing a <h2> (line 19, column 15)', 'Found multiple <h1> tags. Subsequent <h1> tags will be classified as ' '<h2> for the purpose of the structure (line 22, column 1)', 'Found <h3> in the middle of processing a <h2> (line 25, column 9)', # TODO(kalman): Re-enable this warning once the reference pages have # their references fixed. #'Found <h4> without any preceding <h3> (line 28, column 1)', ], result.warnings) # The non-trivial table of contents assertions... self.assertEqual(1, len(result.sections)) entries = result.sections[0].structure self.assertEqual(4, len(entries), entries) entry0, entry1, entry2, entry3 = entries self.assertEqual('hello', entry0.name) self.assertEqual({'id': 'orange'}, entry0.attributes) self.assertEqual(2, len(entry0.entries)) entry0_0, entry0_1 = entry0.entries self.assertEqual('Valencia Oranges', entry0_0.name) self.assertEqual({'id': 'valencia'}, entry0_0.attributes) self.assertEqual([], entry0_0.entries) self.assertEqual('Seville Oranges', entry0_1.name) self.assertEqual({'id': 'seville'}, entry0_1.attributes) self.assertEqual([], entry0_1.entries) self.assertEqual('Grapefruit', entry1.name) self.assertEqual({}, entry1.attributes) self.assertEqual([], entry1.entries) self.assertEqual('Not the main header', entry2.name) self.assertEqual({'id': 'not-main'}, entry2.attributes) self.assertEqual([], entry2.entries) self.assertEqual('Not a banana', entry3.name) self.assertEqual({}, entry3.attributes) self.assertEqual(2, len(entry3.entries)) entry3_1, entry3_2 = entry3.entries self.assertEqual('It\'s a h4', entry3_1.name) self.assertEqual({}, entry3_1.attributes) self.assertEqual([], entry3_1.entries) self.assertEqual('Plantains', entry3_2.name) self.assertEqual({}, entry3_2.attributes) self.assertEqual(1, len(entry3_2.entries)) entry3_2_1, = entry3_2.entries self.assertEqual('Another h4', entry3_2_1.name) self.assertEqual({}, entry3_2_1.attributes) self.assertEqual([], entry3_2_1.entries) def testSingleExplicitSection(self): def test(document): result = ParseDocument(document, expect_title=True) self.assertEqual([], result.warnings) self.assertEqual('Header', result.title) self.assertEqual(1, len(result.sections)) section0, = result.sections entry0, = section0.structure self.assertEqual('An inner header', entry0.name) # A single section, one with the title inside the section, the other out. test('<h1>Header</h1>' '<section>' 'Just a single section here.' '<h2>An inner header</h2>' '</section>') test('<section>' 'Another single section here.' '<h1>Header</h1>' '<h2>An inner header</h2>' '</section>') def testMultipleSections(self): result = ParseDocument( '<h1>Header</h1>' '<h2>First header</h2>' 'This content outside a section is the first section.' '<section>' 'Second section' '<h2>Second header</h2>' '</section>' '<section>' 'Third section' '<h2>Third header</h2>' '</section>', expect_title=True) self.assertEqual([], result.warnings) self.assertEqual('Header', result.title) self.assertEqual(3, len(result.sections)) section0, section1, section2 = result.sections def assert_single_header(section, name): self.assertEqual(1, len(section.structure)) self.assertEqual(name, section.structure[0].name) assert_single_header(section0, 'First header') assert_single_header(section1, 'Second header') assert_single_header(section2, 'Third header') if __name__ == '__main__': unittest.main()
scs/uclinux
refs/heads/master
user/python/python-2.4.4/Lib/nntplib.py
156
"""An NNTP client class based on RFC 977: Network News Transfer Protocol. Example: >>> from nntplib import NNTP >>> s = NNTP('news') >>> resp, count, first, last, name = s.group('comp.lang.python') >>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last Group comp.lang.python has 51 articles, range 5770 to 5821 >>> resp, subs = s.xhdr('subject', first + '-' + last) >>> resp = s.quit() >>> Here 'resp' is the server response line. Error responses are turned into exceptions. To post an article from a file: >>> f = open(filename, 'r') # file containing article, including header >>> resp = s.post(f) >>> For descriptions of all methods, read the comments in the code below. Note that all arguments and return values representing article numbers are strings, not numbers, since they are rarely used for calculations. """ # RFC 977 by Brian Kantor and Phil Lapsley. # xover, xgtitle, xpath, date methods by Kevan Heydon # Imports import re import socket __all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError", "NNTPPermanentError","NNTPProtocolError","NNTPDataError", "error_reply","error_temp","error_perm","error_proto", "error_data",] # Exceptions raised when an error or invalid response is received class NNTPError(Exception): """Base class for all nntplib exceptions""" def __init__(self, *args): Exception.__init__(self, *args) try: self.response = args[0] except IndexError: self.response = 'No response given' class NNTPReplyError(NNTPError): """Unexpected [123]xx reply""" pass class NNTPTemporaryError(NNTPError): """4xx errors""" pass class NNTPPermanentError(NNTPError): """5xx errors""" pass class NNTPProtocolError(NNTPError): """Response does not begin with [1-5]""" pass class NNTPDataError(NNTPError): """Error in response data""" pass # for backwards compatibility error_reply = NNTPReplyError error_temp = NNTPTemporaryError error_perm = NNTPPermanentError error_proto = NNTPProtocolError error_data = NNTPDataError # Standard port used by NNTP servers NNTP_PORT = 119 # Response numbers that are followed by additional text (e.g. article) LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282'] # Line terminators (we always output CRLF, but accept any of CRLF, CR, LF) CRLF = '\r\n' # The class itself class NNTP: def __init__(self, host, port=NNTP_PORT, user=None, password=None, readermode=None, usenetrc=True): """Initialize an instance. Arguments: - host: hostname to connect to - port: port to connect to (default the standard NNTP port) - user: username to authenticate with - password: password to use with username - readermode: if true, send 'mode reader' command after connecting. readermode is sometimes necessary if you are connecting to an NNTP server on the local machine and intend to call reader-specific comamnds, such as `group'. If you get unexpected NNTPPermanentErrors, you might need to set readermode. """ self.host = host self.port = port self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((self.host, self.port)) self.file = self.sock.makefile('rb') self.debugging = 0 self.welcome = self.getresp() # 'mode reader' is sometimes necessary to enable 'reader' mode. # However, the order in which 'mode reader' and 'authinfo' need to # arrive differs between some NNTP servers. Try to send # 'mode reader', and if it fails with an authorization failed # error, try again after sending authinfo. readermode_afterauth = 0 if readermode: try: self.welcome = self.shortcmd('mode reader') except NNTPPermanentError: # error 500, probably 'not implemented' pass except NNTPTemporaryError, e: if user and e.response[:3] == '480': # Need authorization before 'mode reader' readermode_afterauth = 1 else: raise # If no login/password was specified, try to get them from ~/.netrc # Presume that if .netc has an entry, NNRP authentication is required. try: if usenetrc and not user: import netrc credentials = netrc.netrc() auth = credentials.authenticators(host) if auth: user = auth[0] password = auth[2] except IOError: pass # Perform NNRP authentication if needed. if user: resp = self.shortcmd('authinfo user '+user) if resp[:3] == '381': if not password: raise NNTPReplyError(resp) else: resp = self.shortcmd( 'authinfo pass '+password) if resp[:3] != '281': raise NNTPPermanentError(resp) if readermode_afterauth: try: self.welcome = self.shortcmd('mode reader') except NNTPPermanentError: # error 500, probably 'not implemented' pass # Get the welcome message from the server # (this is read and squirreled away by __init__()). # If the response code is 200, posting is allowed; # if it 201, posting is not allowed def getwelcome(self): """Get the welcome message from the server (this is read and squirreled away by __init__()). If the response code is 200, posting is allowed; if it 201, posting is not allowed.""" if self.debugging: print '*welcome*', repr(self.welcome) return self.welcome def set_debuglevel(self, level): """Set the debugging level. Argument 'level' means: 0: no debugging output (default) 1: print commands and responses but not body text etc. 2: also print raw lines read and sent before stripping CR/LF""" self.debugging = level debug = set_debuglevel def putline(self, line): """Internal: send one line to the server, appending CRLF.""" line = line + CRLF if self.debugging > 1: print '*put*', repr(line) self.sock.sendall(line) def putcmd(self, line): """Internal: send one command to the server (through putline()).""" if self.debugging: print '*cmd*', repr(line) self.putline(line) def getline(self): """Internal: return one line from the server, stripping CRLF. Raise EOFError if the connection is closed.""" line = self.file.readline() if self.debugging > 1: print '*get*', repr(line) if not line: raise EOFError if line[-2:] == CRLF: line = line[:-2] elif line[-1:] in CRLF: line = line[:-1] return line def getresp(self): """Internal: get a response from the server. Raise various errors if the response indicates an error.""" resp = self.getline() if self.debugging: print '*resp*', repr(resp) c = resp[:1] if c == '4': raise NNTPTemporaryError(resp) if c == '5': raise NNTPPermanentError(resp) if c not in '123': raise NNTPProtocolError(resp) return resp def getlongresp(self, file=None): """Internal: get a response plus following text from the server. Raise various errors if the response indicates an error.""" openedFile = None try: # If a string was passed then open a file with that name if isinstance(file, str): openedFile = file = open(file, "w") resp = self.getresp() if resp[:3] not in LONGRESP: raise NNTPReplyError(resp) list = [] while 1: line = self.getline() if line == '.': break if line[:2] == '..': line = line[1:] if file: file.write(line + "\n") else: list.append(line) finally: # If this method created the file, then it must close it if openedFile: openedFile.close() return resp, list def shortcmd(self, line): """Internal: send a command and get the response.""" self.putcmd(line) return self.getresp() def longcmd(self, line, file=None): """Internal: send a command and get the response plus following text.""" self.putcmd(line) return self.getlongresp(file) def newgroups(self, date, time, file=None): """Process a NEWGROUPS command. Arguments: - date: string 'yymmdd' indicating the date - time: string 'hhmmss' indicating the time Return: - resp: server response if successful - list: list of newsgroup names""" return self.longcmd('NEWGROUPS ' + date + ' ' + time, file) def newnews(self, group, date, time, file=None): """Process a NEWNEWS command. Arguments: - group: group name or '*' - date: string 'yymmdd' indicating the date - time: string 'hhmmss' indicating the time Return: - resp: server response if successful - list: list of message ids""" cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time return self.longcmd(cmd, file) def list(self, file=None): """Process a LIST command. Return: - resp: server response if successful - list: list of (group, last, first, flag) (strings)""" resp, list = self.longcmd('LIST', file) for i in range(len(list)): # Parse lines into "group last first flag" list[i] = tuple(list[i].split()) return resp, list def description(self, group): """Get a description for a single group. If more than one group matches ('group' is a pattern), return the first. If no group matches, return an empty string. This elides the response code from the server, since it can only be '215' or '285' (for xgtitle) anyway. If the response code is needed, use the 'descriptions' method. NOTE: This neither checks for a wildcard in 'group' nor does it check whether the group actually exists.""" resp, lines = self.descriptions(group) if len(lines) == 0: return "" else: return lines[0][1] def descriptions(self, group_pattern): """Get descriptions for a range of groups.""" line_pat = re.compile("^(?P<group>[^ \t]+)[ \t]+(.*)$") # Try the more std (acc. to RFC2980) LIST NEWSGROUPS first resp, raw_lines = self.longcmd('LIST NEWSGROUPS ' + group_pattern) if resp[:3] != "215": # Now the deprecated XGTITLE. This either raises an error # or succeeds with the same output structure as LIST # NEWSGROUPS. resp, raw_lines = self.longcmd('XGTITLE ' + group_pattern) lines = [] for raw_line in raw_lines: match = line_pat.search(raw_line.strip()) if match: lines.append(match.group(1, 2)) return resp, lines def group(self, name): """Process a GROUP command. Argument: - group: the group name Returns: - resp: server response if successful - count: number of articles (string) - first: first article number (string) - last: last article number (string) - name: the group name""" resp = self.shortcmd('GROUP ' + name) if resp[:3] != '211': raise NNTPReplyError(resp) words = resp.split() count = first = last = 0 n = len(words) if n > 1: count = words[1] if n > 2: first = words[2] if n > 3: last = words[3] if n > 4: name = words[4].lower() return resp, count, first, last, name def help(self, file=None): """Process a HELP command. Returns: - resp: server response if successful - list: list of strings""" return self.longcmd('HELP',file) def statparse(self, resp): """Internal: parse the response of a STAT, NEXT or LAST command.""" if resp[:2] != '22': raise NNTPReplyError(resp) words = resp.split() nr = 0 id = '' n = len(words) if n > 1: nr = words[1] if n > 2: id = words[2] return resp, nr, id def statcmd(self, line): """Internal: process a STAT, NEXT or LAST command.""" resp = self.shortcmd(line) return self.statparse(resp) def stat(self, id): """Process a STAT command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: the article number - id: the message id""" return self.statcmd('STAT ' + id) def next(self): """Process a NEXT command. No arguments. Return as for STAT.""" return self.statcmd('NEXT') def last(self): """Process a LAST command. No arguments. Return as for STAT.""" return self.statcmd('LAST') def artcmd(self, line, file=None): """Internal: process a HEAD, BODY or ARTICLE command.""" resp, list = self.longcmd(line, file) resp, nr, id = self.statparse(resp) return resp, nr, id, list def head(self, id): """Process a HEAD command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article's header""" return self.artcmd('HEAD ' + id) def body(self, id, file=None): """Process a BODY command. Argument: - id: article number or message id - file: Filename string or file object to store the article in Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article's body or an empty list if file was used""" return self.artcmd('BODY ' + id, file) def article(self, id): """Process an ARTICLE command. Argument: - id: article number or message id Returns: - resp: server response if successful - nr: article number - id: message id - list: the lines of the article""" return self.artcmd('ARTICLE ' + id) def slave(self): """Process a SLAVE command. Returns: - resp: server response if successful""" return self.shortcmd('SLAVE') def xhdr(self, hdr, str, file=None): """Process an XHDR command (optional server extension). Arguments: - hdr: the header type (e.g. 'subject') - str: an article nr, a message id, or a range nr1-nr2 Returns: - resp: server response if successful - list: list of (nr, value) strings""" pat = re.compile('^([0-9]+) ?(.*)\n?') resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str, file) for i in range(len(lines)): line = lines[i] m = pat.match(line) if m: lines[i] = m.group(1, 2) return resp, lines def xover(self, start, end, file=None): """Process an XOVER command (optional server extension) Arguments: - start: start of range - end: end of range Returns: - resp: server response if successful - list: list of (art-nr, subject, poster, date, id, references, size, lines)""" resp, lines = self.longcmd('XOVER ' + start + '-' + end, file) xover_lines = [] for line in lines: elem = line.split("\t") try: xover_lines.append((elem[0], elem[1], elem[2], elem[3], elem[4], elem[5].split(), elem[6], elem[7])) except IndexError: raise NNTPDataError(line) return resp,xover_lines def xgtitle(self, group, file=None): """Process an XGTITLE command (optional server extension) Arguments: - group: group name wildcard (i.e. news.*) Returns: - resp: server response if successful - list: list of (name,title) strings""" line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$") resp, raw_lines = self.longcmd('XGTITLE ' + group, file) lines = [] for raw_line in raw_lines: match = line_pat.search(raw_line.strip()) if match: lines.append(match.group(1, 2)) return resp, lines def xpath(self,id): """Process an XPATH command (optional server extension) Arguments: - id: Message id of article Returns: resp: server response if successful path: directory path to article""" resp = self.shortcmd("XPATH " + id) if resp[:3] != '223': raise NNTPReplyError(resp) try: [resp_num, path] = resp.split() except ValueError: raise NNTPReplyError(resp) else: return resp, path def date (self): """Process the DATE command. Arguments: None Returns: resp: server response if successful date: Date suitable for newnews/newgroups commands etc. time: Time suitable for newnews/newgroups commands etc.""" resp = self.shortcmd("DATE") if resp[:3] != '111': raise NNTPReplyError(resp) elem = resp.split() if len(elem) != 2: raise NNTPDataError(resp) date = elem[1][2:8] time = elem[1][-6:] if len(date) != 6 or len(time) != 6: raise NNTPDataError(resp) return resp, date, time def post(self, f): """Process a POST command. Arguments: - f: file containing the article Returns: - resp: server response if successful""" resp = self.shortcmd('POST') # Raises error_??? if posting is not allowed if resp[0] != '3': raise NNTPReplyError(resp) while 1: line = f.readline() if not line: break if line[-1] == '\n': line = line[:-1] if line[:1] == '.': line = '.' + line self.putline(line) self.putline('.') return self.getresp() def ihave(self, id, f): """Process an IHAVE command. Arguments: - id: message-id of the article - f: file containing the article Returns: - resp: server response if successful Note that if the server refuses the article an exception is raised.""" resp = self.shortcmd('IHAVE ' + id) # Raises error_??? if the server already has it if resp[0] != '3': raise NNTPReplyError(resp) while 1: line = f.readline() if not line: break if line[-1] == '\n': line = line[:-1] if line[:1] == '.': line = '.' + line self.putline(line) self.putline('.') return self.getresp() def quit(self): """Process a QUIT command and close the socket. Returns: - resp: server response if successful""" resp = self.shortcmd('QUIT') self.file.close() self.sock.close() del self.file, self.sock return resp # Test retrieval when run as a script. # Assumption: if there's a local news server, it's called 'news'. # Assumption: if user queries a remote news server, it's named # in the environment variable NNTPSERVER (used by slrn and kin) # and we want readermode off. if __name__ == '__main__': import os newshost = 'news' and os.environ["NNTPSERVER"] if newshost.find('.') == -1: mode = 'readermode' else: mode = None s = NNTP(newshost, readermode=mode) resp, count, first, last, name = s.group('comp.lang.python') print resp print 'Group', name, 'has', count, 'articles, range', first, 'to', last resp, subs = s.xhdr('subject', first + '-' + last) print resp for item in subs: print "%7s %s" % item resp = s.quit() print resp
sanyaade-teachings/oppia
refs/heads/master
extensions/objects/models/objects.py
15
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes for interpreting typed objects in Oppia.""" __author__ = 'Sean Lip' import copy import os import feconf import schema_utils import utils class BaseObject(object): """Base object class. This is the superclass for typed object specifications in Oppia, such as SanitizedUrl and CoordTwoDim. Typed objects are initialized from a raw Python object which is expected to be derived from a JSON object. They are validated and normalized to basic Python objects (primitive types combined via lists and dicts; no sets or tuples). """ # These values should be overridden in subclasses. description = '' edit_html_filename = None edit_js_filename = None @classmethod def normalize(cls, raw): """Validates and normalizes a raw Python object. Returns: a normalized Python object describing the Object specified by this class. Raises: TypeError: if the Python object cannot be normalized. """ return schema_utils.normalize_against_schema(raw, cls.SCHEMA) @classmethod def has_editor_js_template(cls): return cls.edit_js_filename is not None @classmethod def get_editor_js_template(cls): if cls.edit_js_filename is None: raise Exception( 'There is no editor template defined for objects of type %s' % cls.__name__) return utils.get_file_contents(os.path.join( os.getcwd(), feconf.OBJECT_TEMPLATES_DIR, '%s.js' % cls.edit_js_filename)) @classmethod def get_editor_html_template(cls): if cls.edit_html_filename is None: raise Exception( 'There is no editor template defined for objects of type %s' % cls.__name__) return utils.get_file_contents(os.path.join( os.getcwd(), feconf.OBJECT_TEMPLATES_DIR, '%s.html' % cls.edit_html_filename)) class Null(BaseObject): """Class for a null object.""" description = 'A null object.' @classmethod def normalize(cls, raw): """Validates and normalizes a raw Python object.""" return None class Boolean(BaseObject): """Class for booleans.""" description = 'A boolean.' edit_html_filename = 'boolean_editor' edit_js_filename = 'BooleanEditor' SCHEMA = { 'type': 'bool' } @classmethod def normalize(cls, raw): """Validates and normalizes a raw Python object.""" if raw is None or raw == '': raw = False return schema_utils.normalize_against_schema(raw, cls.SCHEMA) class Real(BaseObject): """Real number class.""" description = 'A real number.' edit_html_filename = 'real_editor' edit_js_filename = 'RealEditor' SCHEMA = { 'type': 'float' } class Int(BaseObject): """Integer class.""" description = 'An integer.' edit_html_filename = 'int_editor' edit_js_filename = 'IntEditor' SCHEMA = { 'type': 'int' } class UnicodeString(BaseObject): """Unicode string class.""" description = 'A unicode string.' edit_html_filename = 'unicode_string_editor' edit_js_filename = 'UnicodeStringEditor' SCHEMA = { 'type': 'unicode', } class Html(BaseObject): """HTML string class.""" description = 'An HTML string.' edit_html_filename = 'html_editor' edit_js_filename = 'HtmlEditor' SCHEMA = { 'type': 'html', } class NonnegativeInt(BaseObject): """Nonnegative integer class.""" description = 'A non-negative integer.' edit_html_filename = 'nonnegative_int_editor' edit_js_filename = 'NonnegativeIntEditor' SCHEMA = { 'type': 'int', 'validators': [{ 'id': 'is_at_least', 'min_value': 0 }] } class CodeEvaluation(BaseObject): """Evaluation result of programming code.""" description = 'Code and its evaluation results.' SCHEMA = { 'type': 'dict', 'properties': [{ 'name': 'code', 'schema': UnicodeString.SCHEMA, }, { 'name': 'output', 'schema': UnicodeString.SCHEMA, }, { 'name': 'evaluation', 'schema': UnicodeString.SCHEMA, }, { 'name': 'error', 'schema': UnicodeString.SCHEMA, }] } class CoordTwoDim(BaseObject): """2D coordinate class.""" description = 'A two-dimensional coordinate (a pair of reals).' edit_html_filename = 'coord_two_dim_editor' edit_js_filename = 'CoordTwoDimEditor' SCHEMA = { 'type': 'list', 'len': 2, 'items': Real.SCHEMA, } class ListOfUnicodeString(BaseObject): """List class.""" description = 'A list.' edit_html_filename = 'list_editor' edit_js_filename = 'ListOfUnicodeStringEditor' SCHEMA = { 'type': 'list', 'items': UnicodeString.SCHEMA } class SetOfUnicodeString(BaseObject): """Class for sets of UnicodeStrings.""" description = 'A set (a list with unique elements) of unicode strings.' edit_html_filename = 'list_editor' edit_js_filename = 'SetOfUnicodeStringEditor' SCHEMA = { 'type': 'list', 'items': UnicodeString.SCHEMA, 'validators': [{ 'id': 'is_uniquified' }] } class NormalizedString(BaseObject): """Unicode string with spaces collapsed.""" description = 'A unicode string with adjacent whitespace collapsed.' edit_html_filename = 'unicode_string_editor' edit_js_filename = 'NormalizedStringEditor' SCHEMA = { 'type': 'unicode', 'post_normalizers': [{ 'id': 'normalize_spaces' }] } class MathLatexString(BaseObject): """Math LaTeX string class""" description = 'A LaTeX string.' edit_html_filename = 'math_latex_string_editor' edit_js_filename = 'MathLatexStringEditor' SCHEMA = UnicodeString.SCHEMA class SanitizedUrl(BaseObject): """HTTP or HTTPS url string class.""" description = 'An HTTP or HTTPS url.' edit_html_filename = 'unicode_string_editor' edit_js_filename = 'SanitizedUrlEditor' SCHEMA = { 'type': 'unicode', 'post_normalizers': [{ 'id': 'sanitize_url' }] } class MusicPhrase(BaseObject): """List of Objects that represent a musical phrase.""" description = ('A musical phrase that contains zero or more notes, rests, ' 'and time signature.') edit_html_filename = 'music_phrase_editor' edit_js_filename = 'MusicPhraseEditor' # The maximum number of notes allowed in a music phrase. _MAX_NOTES_IN_PHRASE = 8 _FRACTION_PART_SCHEMA = { 'type': 'int', 'validators': [{ 'id': 'is_at_least', 'min_value': 1 }] } SCHEMA = { 'type': 'list', 'items': { 'type': 'dict', 'properties': [{ 'name': 'readableNoteName', 'schema': { 'type': 'unicode', 'choices': [ 'C4', 'D4', 'E4', 'F4', 'G4', 'A4', 'B4', 'C5', 'D5', 'E5', 'F5', 'G5', 'A5' ] } }, { 'name': 'noteDuration', 'schema': { 'type': 'dict', 'properties': [{ 'name': 'num', 'schema': _FRACTION_PART_SCHEMA }, { 'name': 'den', 'schema': _FRACTION_PART_SCHEMA }] } }], }, 'validators': [{ 'id': 'has_length_at_most', 'max_value': _MAX_NOTES_IN_PHRASE, }] } class Filepath(BaseObject): """A string representing a filepath. The path will be prefixed with '[exploration_id]/assets'. """ description = 'A string that represents a filepath' edit_html_filename = 'filepath_editor' edit_js_filename = 'FilepathEditor' SCHEMA = UnicodeString.SCHEMA class CheckedProof(BaseObject): """A proof attempt and any errors it makes.""" description = 'A proof attempt and any errors it makes.' @classmethod def normalize(cls, raw): """Validates and normalizes a raw Python object.""" try: assert isinstance(raw, dict) assert isinstance(raw['assumptions_string'], basestring) assert isinstance(raw['target_string'], basestring) assert isinstance(raw['proof_string'], basestring) assert raw['correct'] in [True, False] if not raw['correct']: assert isinstance(raw['error_category'], basestring) assert isinstance(raw['error_code'], basestring) assert isinstance(raw['error_message'], basestring) assert isinstance(raw['error_line_number'], int) return copy.deepcopy(raw) except Exception: raise TypeError('Cannot convert to checked proof %s' % raw) class LogicQuestion(BaseObject): """A question giving a formula to prove""" description = 'A question giving a formula to prove.' edit_html_filename = 'logic_question_editor' edit_js_filename = 'LogicQuestionEditor' @classmethod def normalize(cls, raw): """Validates and normalizes a raw Python object.""" def _validateExpression(expression): assert isinstance(expression, dict) assert isinstance(expression['top_kind_name'], basestring) assert isinstance(expression['top_operator_name'], basestring) _validateExpressionArray(expression['arguments']) _validateExpressionArray(expression['dummies']) def _validateExpressionArray(array): assert isinstance(array, list) for item in array: _validateExpression(item) try: assert isinstance(raw, dict) _validateExpressionArray(raw['assumptions']) _validateExpressionArray(raw['results']) assert isinstance(raw['default_proof_string'], basestring) return copy.deepcopy(raw) except Exception: raise TypeError('Cannot convert to a logic question %s' % raw) class LogicErrorCategory(BaseObject): """A string from a list of possible categories""" description = 'One of the possible error categories of a logic proof.' edit_html_filename = 'logic_error_category_editor' edit_js_filename = 'LogicErrorCategoryEditor' SCHEMA = { 'type': 'unicode', 'choices': [ 'parsing', 'typing', 'line', 'layout', 'variables', 'logic', 'target', 'mistake' ] } class Graph(BaseObject): """A (mathematical) graph with edges and vertices""" description = 'A (mathematical) graph' edit_html_filename = 'graph_editor' edit_js_filename = 'GraphEditor' _VERTEX_SCHEMA = { 'type': 'dict', 'properties': [{ 'name': 'x', 'schema': Real.SCHEMA }, { 'name': 'y', 'schema': Real.SCHEMA }, { 'name': 'label', 'schema': UnicodeString.SCHEMA }] } _EDGE_SCHEMA = { 'type': 'dict', 'properties': [{ 'name': 'src', 'schema': Int.SCHEMA }, { 'name': 'dst', 'schema': Int.SCHEMA }, { 'name': 'weight', 'schema': Int.SCHEMA }] } SCHEMA = { 'type': 'dict', 'properties': [{ 'name': 'vertices', 'schema': { 'type': 'list', 'items': _VERTEX_SCHEMA } }, { 'name': 'edges', 'schema': { 'type': 'list', 'items': _EDGE_SCHEMA } }, { 'name': 'isLabeled', 'schema': Boolean.SCHEMA }, { 'name': 'isDirected', 'schema': Boolean.SCHEMA }, { 'name': 'isWeighted', 'schema': Boolean.SCHEMA }] } @classmethod def normalize(cls, raw): """Validates and normalizes a raw Python object.""" """ Checks that there are no self-loops or multiple edges. Checks that unlabeled graphs have all labels empty. Checks that unweighted graphs have all weights set to 1. TODO(czx): Think about support for multigraphs? """ try: raw = schema_utils.normalize_against_schema(raw, cls.SCHEMA) if not raw['isLabeled']: for vertex in raw['vertices']: assert (vertex['label'] == '') for edge in raw['edges']: assert (edge['src'] != edge['dst']) if not raw['isWeighted']: assert (edge['weight'] == 1.0) if raw['isDirected']: edge_pairs = [(edge['src'], edge['dst']) for edge in raw['edges']] else: edge_pairs = ( [(edge['src'], edge['dst']) for edge in raw['edges']] + [(edge['dst'], edge['src']) for edge in raw['edges']] ) assert len(set(edge_pairs)) == len(edge_pairs) except Exception: raise TypeError('Cannot convert to graph %s' % raw) return raw class NormalizedRectangle2D(BaseObject): """Normalized Rectangle class.""" description = 'A rectangle normalized so that the coordinates are within the range [0,1].' SCHEMA = { 'type': 'list', 'len': 2, 'items': { 'type': 'list', 'len': 2, 'items': Real.SCHEMA } } @classmethod def normalize(cls, raw): # Moves cur_value to the nearest available value in the range [min_value, max_value] def clamp(min_value, current_value, max_value): return min(max_value, max(min_value, current_value)) try: raw = schema_utils.normalize_against_schema(raw, cls.SCHEMA) raw[0][0] = clamp(0.0, raw[0][0], 1.0) raw[0][1] = clamp(0.0, raw[0][1], 1.0) raw[1][0] = clamp(0.0, raw[1][0], 1.0) raw[1][1] = clamp(0.0, raw[1][1], 1.0) except Exception: raise TypeError('Cannot convert to Normalized Rectangle %s' % raw) return raw class ImageRegion(BaseObject): """A region of an image, including its shape and coordinates.""" description = 'A region of an image.' # Note: at the moment, only supports rectangular image regions # Coordinates are [[top-left-x, top-left-y], [bottom-right-x, bottom-right-y]] # origin is top-left, increasing x is to the right, increasing y is down SCHEMA = { 'type': 'dict', 'properties': [{ 'name': 'regionType', 'schema': UnicodeString.SCHEMA }, { 'name': 'area', 'schema': NormalizedRectangle2D.SCHEMA }] } class ImageWithRegions(BaseObject): """An image overlaid with labeled regions.""" description = 'An image overlaid with regions.' edit_html_filename = 'image_with_regions_editor' edit_js_filename = 'ImageWithRegionsEditor' SCHEMA = { 'type': 'dict', 'properties': [{ 'name': 'imagePath', 'schema': Filepath.SCHEMA }, { 'name': 'labeledRegions', 'schema': { 'type': 'list', 'items': { 'type': 'dict', 'properties': [{ 'name': 'label', 'schema': UnicodeString.SCHEMA }, { 'name': 'region', 'schema': ImageRegion.SCHEMA }] } } }] } class ClickOnImage(BaseObject): """A click on an image and the clicked regions.""" description = "Position of a click and a list of regions clicked." SCHEMA = { 'type': 'dict', 'properties': [{ 'name': 'clickPosition', 'schema': { 'type': 'list', 'items': Real.SCHEMA, 'len': 2 } }, { 'name': 'clickedRegions', 'schema': { 'type': 'list', 'items': UnicodeString.SCHEMA } }] }
kingvuplus/enigma2
refs/heads/master
lib/python/Plugins/newplugin.py
37
#!/usr/bin/python import os os.system("clear") internalname = raw_input("Internal plugin name (no whitespaces, plugin directory): ") name = raw_input("Visible plugin name: ") print os.system("clear") dirlist = [] count = 0 print "Plugin categories:" for dir in os.listdir("."): if os.path.isdir(dir): count += 1 dirlist.append(dir) print count, dir category = raw_input("Select plugin category: ") category = dirlist[int(category) - 1] def add_where_extensionsmenu(name, fnc): description = raw_input("Plugin description: ") return 'PluginDescriptor(name = "%s", description = _("%s"), where = PluginDescriptor.WHERE_EXTENSIONSMENU, fnc = %s)' % (name, description, fnc) def add_where_pluginmenu(name, fnc): description = raw_input("Plugin description: ") icon = raw_input("Icon (default: 'plugin.png': ") if icon == "": icon = "plugin.png" return 'PluginDescriptor(name = "%s", description = _("%s"), icon = "%s", where = PluginDescriptor.WHERE_PLUGINMENU, fnc = %s)' % (name, description, icon, fnc) wherelist = [] wherelist.append(("WHERE_EXTENSIONSMENU", add_where_extensionsmenu)) wherelist.append(("WHERE_PLUGINMENU", add_where_pluginmenu)) targetlist = [] stop = False while not stop: os.system("clear") print "selected targets:" for where in targetlist: print where[0] print print "available targets:" count = 0 for where in wherelist: count += 1 print count, where[0] print "x break" target = raw_input("Select WHERE-target: ") if target == "x": stop = True else: if wherelist[int(target) - 1] not in targetlist: targetlist.append(wherelist[int(target) - 1]) else: targetlist.remove(wherelist[int(target) - 1]) pluginpath = category + "/" + internalname os.mkdir(pluginpath) makefile = open(category + "/Makefile.am", "r") lines = makefile.readlines() lines = ''.join(lines) lines = lines.strip() lines += " " + internalname makefile.close() makefile = open(category + "/Makefile.am", "w") makefile.write(lines) makefile.close() lines = [] print "open" configure = open("../../../configure.ac", "r") while True: line = configure.readline() if not line: break lines.append(line) if line.strip() == "lib/python/Plugins/" + category + "/Makefile": lines.append("lib/python/Plugins/" + pluginpath + "/Makefile\n") configure.close() print "close" configure = open("../../../configure.ac", "w") configure.writelines(lines) configure.close() file = open(pluginpath + "/plugin.py", "w") importlist = [] for where in targetlist: importlist.append(where[0]) file.write("""from Screens.Screen import Screen from Plugins.Plugin import PluginDescriptor """) mainlist = [] for count in range(len(targetlist)): if count == 0: mainlist.append("main") else: mainlist.append("main" + str(count)) for main in mainlist: file.write(""" def %s(session, **kwargs): pass """ % main) descriptorlist = [] for count in range(len(targetlist)): os.system("clear") where = targetlist[count] print "Options for target %s" % where[0] descriptorlist.append(where[1](name, mainlist[count])) if len(descriptorlist) == 1: descriptorlist = descriptorlist[0] else: descriptorlist = "[" + ', '.join(descriptorlist) + "]" file.write(""" def Plugins(**kwargs): return %s """ % descriptorlist) file.close() makefile = open(pluginpath + "/Makefile.am", "w") makefile.write("""installdir = $(pkglibdir)/python/Plugins/%s/%s install_PYTHON = \\ __init__.py \\ plugin.py """ % (category, internalname)) makefile.close()
dsaraujo/circulante
refs/heads/master
django/core/files/storage.py
158
import os import errno import urlparse import itertools from datetime import datetime from django.conf import settings from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation from django.core.files import locks, File from django.core.files.move import file_move_safe from django.utils.encoding import force_unicode, filepath_to_uri from django.utils.functional import LazyObject from django.utils.importlib import import_module from django.utils.text import get_valid_filename from django.utils._os import safe_join __all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage') class Storage(object): """ A base storage class, providing some default behaviors that all other storage systems can inherit or override, as necessary. """ # The following methods represent a public interface to private methods. # These shouldn't be overridden by subclasses unless absolutely necessary. def open(self, name, mode='rb', mixin=None): """ Retrieves the specified file from storage, using the optional mixin class to customize what features are available on the File returned. """ file = self._open(name, mode) if mixin: # Add the mixin as a parent class of the File returned from storage. file.__class__ = type(mixin.__name__, (mixin, file.__class__), {}) return file def save(self, name, content): """ Saves new content to the file specified by name. The content should be a proper File object, ready to be read from the beginning. """ # Get the proper name for the file, as it will actually be saved. if name is None: name = content.name name = self.get_available_name(name) name = self._save(name, content) # Store filenames with forward slashes, even on Windows return force_unicode(name.replace('\\', '/')) # These methods are part of the public API, with default implementations. def get_valid_name(self, name): """ Returns a filename, based on the provided filename, that's suitable for use in the target storage system. """ return get_valid_filename(name) def get_available_name(self, name): """ Returns a filename that's free on the target storage system, and available for new content to be written to. """ dir_name, file_name = os.path.split(name) file_root, file_ext = os.path.splitext(file_name) # If the filename already exists, add an underscore and a number (before # the file extension, if one exists) to the filename until the generated # filename doesn't exist. count = itertools.count(1) while self.exists(name): # file_ext includes the dot. name = os.path.join(dir_name, "%s_%s%s" % (file_root, count.next(), file_ext)) return name def path(self, name): """ Returns a local filesystem path where the file can be retrieved using Python's built-in open() function. Storage systems that can't be accessed using open() should *not* implement this method. """ raise NotImplementedError("This backend doesn't support absolute paths.") # The following methods form the public API for storage systems, but with # no default implementations. Subclasses must implement *all* of these. def delete(self, name): """ Deletes the specified file from the storage system. """ raise NotImplementedError() def exists(self, name): """ Returns True if a file referened by the given name already exists in the storage system, or False if the name is available for a new file. """ raise NotImplementedError() def listdir(self, path): """ Lists the contents of the specified path, returning a 2-tuple of lists; the first item being directories, the second item being files. """ raise NotImplementedError() def size(self, name): """ Returns the total size, in bytes, of the file specified by name. """ raise NotImplementedError() def url(self, name): """ Returns an absolute URL where the file's contents can be accessed directly by a Web browser. """ raise NotImplementedError() def accessed_time(self, name): """ Returns the last accessed time (as datetime object) of the file specified by name. """ raise NotImplementedError() def created_time(self, name): """ Returns the creation time (as datetime object) of the file specified by name. """ raise NotImplementedError() def modified_time(self, name): """ Returns the last modified time (as datetime object) of the file specified by name. """ raise NotImplementedError() class FileSystemStorage(Storage): """ Standard filesystem storage """ def __init__(self, location=None, base_url=None): if location is None: location = settings.MEDIA_ROOT if base_url is None: base_url = settings.MEDIA_URL self.location = os.path.abspath(location) self.base_url = base_url def _open(self, name, mode='rb'): return File(open(self.path(name), mode)) def _save(self, name, content): full_path = self.path(name) directory = os.path.dirname(full_path) if not os.path.exists(directory): os.makedirs(directory) elif not os.path.isdir(directory): raise IOError("%s exists and is not a directory." % directory) # There's a potential race condition between get_available_name and # saving the file; it's possible that two threads might return the # same name, at which point all sorts of fun happens. So we need to # try to create the file, but if it already exists we have to go back # to get_available_name() and try again. while True: try: # This file has a file path that we can move. if hasattr(content, 'temporary_file_path'): file_move_safe(content.temporary_file_path(), full_path) content.close() # This is a normal uploadedfile that we can stream. else: # This fun binary flag incantation makes os.open throw an # OSError if the file already exists before we open it. fd = os.open(full_path, os.O_WRONLY | os.O_CREAT | os.O_EXCL | getattr(os, 'O_BINARY', 0)) try: locks.lock(fd, locks.LOCK_EX) for chunk in content.chunks(): os.write(fd, chunk) finally: locks.unlock(fd) os.close(fd) except OSError, e: if e.errno == errno.EEXIST: # Ooops, the file exists. We need a new file name. name = self.get_available_name(name) full_path = self.path(name) else: raise else: # OK, the file save worked. Break out of the loop. break if settings.FILE_UPLOAD_PERMISSIONS is not None: os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS) return name def delete(self, name): name = self.path(name) # If the file exists, delete it from the filesystem. if os.path.exists(name): os.remove(name) def exists(self, name): return os.path.exists(self.path(name)) def listdir(self, path): path = self.path(path) directories, files = [], [] for entry in os.listdir(path): if os.path.isdir(os.path.join(path, entry)): directories.append(entry) else: files.append(entry) return directories, files def path(self, name): try: path = safe_join(self.location, name) except ValueError: raise SuspiciousOperation("Attempted access to '%s' denied." % name) return os.path.normpath(path) def size(self, name): return os.path.getsize(self.path(name)) def url(self, name): if self.base_url is None: raise ValueError("This file is not accessible via a URL.") return urlparse.urljoin(self.base_url, filepath_to_uri(name)) def accessed_time(self, name): return datetime.fromtimestamp(os.path.getatime(self.path(name))) def created_time(self, name): return datetime.fromtimestamp(os.path.getctime(self.path(name))) def modified_time(self, name): return datetime.fromtimestamp(os.path.getmtime(self.path(name))) def get_storage_class(import_path=None): if import_path is None: import_path = settings.DEFAULT_FILE_STORAGE try: dot = import_path.rindex('.') except ValueError: raise ImproperlyConfigured("%s isn't a storage module." % import_path) module, classname = import_path[:dot], import_path[dot+1:] try: mod = import_module(module) except ImportError, e: raise ImproperlyConfigured('Error importing storage module %s: "%s"' % (module, e)) try: return getattr(mod, classname) except AttributeError: raise ImproperlyConfigured('Storage module "%s" does not define a "%s" class.' % (module, classname)) class DefaultStorage(LazyObject): def _setup(self): self._wrapped = get_storage_class()() default_storage = DefaultStorage()
jaywreddy/django
refs/heads/master
tests/forms_tests/tests/test_utils.py
130
# -*- coding: utf-8 -*- from __future__ import unicode_literals import copy from django.core.exceptions import ValidationError from django.forms.utils import ErrorDict, ErrorList, flatatt from django.test import SimpleTestCase from django.utils import six from django.utils.encoding import force_text, python_2_unicode_compatible from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy class FormsUtilsTestCase(SimpleTestCase): # Tests for forms/utils.py module. def test_flatatt(self): ########### # flatatt # ########### self.assertEqual(flatatt({'id': "header"}), ' id="header"') self.assertEqual(flatatt({'class': "news", 'title': "Read this"}), ' class="news" title="Read this"') self.assertEqual( flatatt({'class': "news", 'title': "Read this", 'required': "required"}), ' class="news" required="required" title="Read this"' ) self.assertEqual( flatatt({'class': "news", 'title': "Read this", 'required': True}), ' class="news" title="Read this" required' ) self.assertEqual( flatatt({'class': "news", 'title': "Read this", 'required': False}), ' class="news" title="Read this"' ) self.assertEqual(flatatt({}), '') def test_flatatt_no_side_effects(self): """ Fixes #23883 -- Check that flatatt does not modify the dict passed in """ attrs = {'foo': 'bar', 'true': True, 'false': False} attrs_copy = copy.copy(attrs) self.assertEqual(attrs, attrs_copy) first_run = flatatt(attrs) self.assertEqual(attrs, attrs_copy) self.assertEqual(first_run, ' foo="bar" true') second_run = flatatt(attrs) self.assertEqual(attrs, attrs_copy) self.assertEqual(first_run, second_run) def test_validation_error(self): ################### # ValidationError # ################### # Can take a string. self.assertHTMLEqual(str(ErrorList(ValidationError("There was an error.").messages)), '<ul class="errorlist"><li>There was an error.</li></ul>') # Can take a unicode string. self.assertHTMLEqual(six.text_type(ErrorList(ValidationError("Not \u03C0.").messages)), '<ul class="errorlist"><li>Not π.</li></ul>') # Can take a lazy string. self.assertHTMLEqual(str(ErrorList(ValidationError(ugettext_lazy("Error.")).messages)), '<ul class="errorlist"><li>Error.</li></ul>') # Can take a list. self.assertHTMLEqual(str(ErrorList(ValidationError(["Error one.", "Error two."]).messages)), '<ul class="errorlist"><li>Error one.</li><li>Error two.</li></ul>') # Can take a dict. self.assertHTMLEqual( str(ErrorList(sorted(ValidationError({'error_1': "1. Error one.", 'error_2': "2. Error two."}).messages))), '<ul class="errorlist"><li>1. Error one.</li><li>2. Error two.</li></ul>' ) # Can take a mixture in a list. self.assertHTMLEqual( str(ErrorList(sorted(ValidationError([ "1. First error.", "2. Not \u03C0.", ugettext_lazy("3. Error."), { 'error_1': "4. First dict error.", 'error_2': "5. Second dict error.", }, ]).messages))), '<ul class="errorlist">' '<li>1. First error.</li>' '<li>2. Not π.</li>' '<li>3. Error.</li>' '<li>4. First dict error.</li>' '<li>5. Second dict error.</li>' '</ul>' ) @python_2_unicode_compatible class VeryBadError: def __str__(self): return "A very bad error." # Can take a non-string. self.assertHTMLEqual( str(ErrorList(ValidationError(VeryBadError()).messages)), '<ul class="errorlist"><li>A very bad error.</li></ul>' ) # Escapes non-safe input but not input marked safe. example = 'Example of link: <a href="http://www.example.com/">example</a>' self.assertHTMLEqual( str(ErrorList([example])), '<ul class="errorlist"><li>Example of link: ' '&lt;a href=&quot;http://www.example.com/&quot;&gt;example&lt;/a&gt;</li></ul>' ) self.assertHTMLEqual( str(ErrorList([mark_safe(example)])), '<ul class="errorlist"><li>Example of link: ' '<a href="http://www.example.com/">example</a></li></ul>' ) self.assertHTMLEqual( str(ErrorDict({'name': example})), '<ul class="errorlist"><li>nameExample of link: ' '&lt;a href=&quot;http://www.example.com/&quot;&gt;example&lt;/a&gt;</li></ul>' ) self.assertHTMLEqual( str(ErrorDict({'name': mark_safe(example)})), '<ul class="errorlist"><li>nameExample of link: ' '<a href="http://www.example.com/">example</a></li></ul>' ) def test_error_dict_copy(self): e = ErrorDict() e['__all__'] = ErrorList([ ValidationError( message='message %(i)s', params={'i': 1}, ), ValidationError( message='message %(i)s', params={'i': 2}, ), ]) e_copy = copy.copy(e) self.assertEqual(e, e_copy) self.assertEqual(e.as_data(), e_copy.as_data()) e_deepcopy = copy.deepcopy(e) self.assertEqual(e, e_deepcopy) self.assertEqual(e.as_data(), e_copy.as_data()) def test_error_dict_html_safe(self): e = ErrorDict() e['username'] = 'Invalid username.' self.assertTrue(hasattr(ErrorDict, '__html__')) self.assertEqual(force_text(e), e.__html__()) def test_error_list_html_safe(self): e = ErrorList(['Invalid username.']) self.assertTrue(hasattr(ErrorList, '__html__')) self.assertEqual(force_text(e), e.__html__())
pannarale/pycbc
refs/heads/master
examples/gw150914/gw150914_h1_snr.py
9
from pycbc.frame import read_frame from pycbc.filter import highpass_fir, matched_filter from pycbc.waveform import get_fd_waveform from pycbc.psd import welch, interpolate try: from urllib.request import urlretrieve except ImportError: # python < 3 from urllib import urlretrieve # Read data and remove low frequency content fname = 'H-H1_LOSC_4_V2-1126259446-32.gwf' url = "https://www.gw-openscience.org/GW150914data/" + fname urlretrieve(url, filename=fname) h1 = read_frame('H-H1_LOSC_4_V2-1126259446-32.gwf', 'H1:LOSC-STRAIN') h1 = highpass_fir(h1, 15, 8) # Calculate the noise spectrum psd = interpolate(welch(h1), 1.0 / h1.duration) # Generate a template to filter with hp, hc = get_fd_waveform(approximant="IMRPhenomD", mass1=40, mass2=32, f_lower=20, delta_f=1.0/h1.duration) hp.resize(len(h1) // 2 + 1) # Calculate the complex (two-phase SNR) snr = matched_filter(hp, h1, psd=psd, low_frequency_cutoff=20.0) # Remove regions corrupted by filter wraparound snr = snr[len(snr) // 4: len(snr) * 3 // 4] import pylab pylab.plot(snr.sample_times, abs(snr)) pylab.ylabel('signal-to-noise') pylab.xlabel('GPS Time (s)') pylab.show()
YathishReddy/Robust_ECN_Signalling_With_Nonces
refs/heads/master
src/mpi/bindings/modulegen__gcc_LP64.py
28
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.mpi', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## address.h (module 'network'): ns3::Address [class] module.add_class('Address', import_from_module='ns.network') ## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration] module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) ## buffer.h (module 'network'): ns3::Buffer [class] module.add_class('Buffer', import_from_module='ns.network') ## buffer.h (module 'network'): ns3::Buffer::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer']) ## packet.h (module 'network'): ns3::ByteTagIterator [class] module.add_class('ByteTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::ByteTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList [class] module.add_class('ByteTagList', import_from_module='ns.network') ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class] module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList']) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator']) ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] module.add_class('Ipv4Address', import_from_module='ns.network') ## ipv4-address.h (module 'network'): ns3::Ipv4Address [class] root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class] module.add_class('Ipv4Mask', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] module.add_class('Ipv6Address', import_from_module='ns.network') ## ipv6-address.h (module 'network'): ns3::Ipv6Address [class] root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address']) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class] module.add_class('Ipv6Prefix', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] module.add_class('Mac48Address', import_from_module='ns.network') ## mac48-address.h (module 'network'): ns3::Mac48Address [class] root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address']) ## mpi-interface.h (module 'mpi'): ns3::MpiInterface [class] module.add_class('MpiInterface') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## packet-metadata.h (module 'network'): ns3::PacketMetadata [class] module.add_class('PacketMetadata', import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration] module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network') ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class] module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata']) ## packet.h (module 'network'): ns3::PacketTagIterator [class] module.add_class('PacketTagIterator', import_from_module='ns.network') ## packet.h (module 'network'): ns3::PacketTagIterator::Item [class] module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator']) ## packet-tag-list.h (module 'network'): ns3::PacketTagList [class] module.add_class('PacketTagList', import_from_module='ns.network') ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct] module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList']) ## parallel-communication-interface.h (module 'mpi'): ns3::ParallelCommunicationInterface [class] module.add_class('ParallelCommunicationInterface', allow_subclassing=True) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## tag.h (module 'network'): ns3::Tag [class] module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## tag-buffer.h (module 'network'): ns3::TagBuffer [class] module.add_class('TagBuffer', import_from_module='ns.network') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration] module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## chunk.h (module 'network'): ns3::Chunk [class] module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase']) ## header.h (module 'network'): ns3::Header [class] module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount')) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time [class] root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## trailer.h (module 'network'): ns3::Trailer [class] module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class] module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor']) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class] module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class] module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class] module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class] module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class] module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class] module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class] module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class] module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class] module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class] module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class] module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver [class] module.add_class('MpiReceiver', parent=root_module['ns3::Object']) ## nix-vector.h (module 'network'): ns3::NixVector [class] module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) ## packet.h (module 'network'): ns3::Packet [class] module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## address.h (module 'network'): ns3::AddressChecker [class] module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker']) ## address.h (module 'network'): ns3::AddressValue [class] module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue']) ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) ## Register a nested module for the namespace TracedValueCallback nested_module = module.add_cpp_namespace('TracedValueCallback') register_types_ns3_TracedValueCallback(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_types_ns3_TracedValueCallback(module): root_module = module.get_root() typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*') typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&') def register_methods(root_module): register_Ns3Address_methods(root_module, root_module['ns3::Address']) register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer']) register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator']) register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator']) register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item']) register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList']) register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator']) register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address']) register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask']) register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address']) register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix']) register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address']) register_Ns3MpiInterface_methods(root_module, root_module['ns3::MpiInterface']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata']) register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item']) register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator']) register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator']) register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item']) register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList']) register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData']) register_Ns3ParallelCommunicationInterface_methods(root_module, root_module['ns3::ParallelCommunicationInterface']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Tag_methods(root_module, root_module['ns3::Tag']) register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk']) register_Ns3Header_methods(root_module, root_module['ns3::Header']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >']) register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker']) register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue']) register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker']) register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue']) register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker']) register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue']) register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker']) register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue']) register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker']) register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue']) register_Ns3MpiReceiver_methods(root_module, root_module['ns3::MpiReceiver']) register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector']) register_Ns3Packet_methods(root_module, root_module['ns3::Packet']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker']) register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## address.h (module 'network'): ns3::Address::Address() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor] cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor] cls.add_constructor([param('ns3::Address const &', 'address')]) ## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function] cls.add_method('CheckCompatible', 'bool', [param('uint8_t', 'type'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyAllFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function] cls.add_method('CopyAllTo', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint8_t', 'len')], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function] cls.add_method('CopyFrom', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint8_t', 'len')]) ## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'uint32_t', [param('uint8_t *', 'buffer')], is_const=True) ## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'buffer')]) ## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function] cls.add_method('GetLength', 'uint8_t', [], is_const=True) ## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function] cls.add_method('IsInvalid', 'bool', [], is_const=True) ## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function] cls.add_method('IsMatchingType', 'bool', [param('uint8_t', 'type')], is_const=True) ## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function] cls.add_method('Register', 'uint8_t', [], is_static=True) ## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'buffer')], is_const=True) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'std::_List_const_iterator< ns3::AttributeConstructionList::Item >', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3Buffer_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor] cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')]) ## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor] cls.add_constructor([param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function] cls.add_method('AddAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Buffer const &', 'o')]) ## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function] cls.add_method('AddAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function] cls.add_method('Begin', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Buffer', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function] cls.add_method('End', 'ns3::Buffer::Iterator', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function] cls.add_method('PeekData', 'uint8_t const *', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3BufferIterator_methods(root_module, cls): ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')]) ## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor] cls.add_constructor([]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function] cls.add_method('CalculateIpChecksum', 'uint16_t', [param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')]) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function] cls.add_method('GetDistanceFrom', 'uint32_t', [param('ns3::Buffer::Iterator const &', 'o')], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function] cls.add_method('GetRemainingSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function] cls.add_method('IsEnd', 'bool', [], is_const=True) ## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function] cls.add_method('IsStart', 'bool', [], is_const=True) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function] cls.add_method('Next', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function] cls.add_method('Next', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function] cls.add_method('PeekU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function] cls.add_method('Prev', 'void', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function] cls.add_method('Prev', 'void', [param('uint32_t', 'delta')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function] cls.add_method('Read', 'void', [param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function] cls.add_method('ReadLsbtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function] cls.add_method('ReadLsbtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function] cls.add_method('ReadLsbtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function] cls.add_method('ReadNtohU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function] cls.add_method('ReadNtohU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function] cls.add_method('ReadNtohU64', 'uint64_t', []) ## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function] cls.add_method('Write', 'void', [param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function] cls.add_method('WriteHtolsbU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function] cls.add_method('WriteHtolsbU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function] cls.add_method('WriteHtolsbU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function] cls.add_method('WriteHtonU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function] cls.add_method('WriteHtonU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function] cls.add_method('WriteHtonU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data')]) ## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'data'), param('uint32_t', 'len')]) return def register_Ns3ByteTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagIterator::Item', []) return def register_Ns3ByteTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function] cls.add_method('GetEnd', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function] cls.add_method('GetStart', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3ByteTagList_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor] cls.add_constructor([]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor] cls.add_constructor([param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function] cls.add_method('Add', 'ns3::TagBuffer', [param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function] cls.add_method('Add', 'void', [param('ns3::ByteTagList const &', 'o')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function] cls.add_method('AddAtEnd', 'void', [param('int32_t', 'appendOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function] cls.add_method('AddAtStart', 'void', [param('int32_t', 'prependOffset')]) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function] cls.add_method('Adjust', 'void', [param('int32_t', 'adjustment')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function] cls.add_method('Begin', 'ns3::ByteTagList::Iterator', [param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')], is_const=True) ## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) return def register_Ns3ByteTagListIterator_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')]) ## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function] cls.add_method('GetOffsetStart', 'uint32_t', [], is_const=True) ## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function] cls.add_method('Next', 'ns3::ByteTagList::Iterator::Item', []) return def register_Ns3ByteTagListIteratorItem_methods(root_module, cls): ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor] cls.add_constructor([param('ns3::TagBuffer', 'buf')]) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable] cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable] cls.add_instance_attribute('end', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable] cls.add_instance_attribute('start', 'int32_t', is_const=False) ## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3Ipv4Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor] cls.add_constructor([param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('CombineMask', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv4Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv4Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('GetSubnetDirectedBroadcast', 'ns3::Ipv4Address', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Address', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Address const &', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function] cls.add_method('IsLocalMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function] cls.add_method('IsSubnetDirectedBroadcast', 'bool', [param('ns3::Ipv4Mask const &', 'mask')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'address')]) ## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) return def register_Ns3Ipv4Mask_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor] cls.add_constructor([param('uint32_t', 'mask')]) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor] cls.add_constructor([param('char const *', 'mask')]) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function] cls.add_method('Get', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function] cls.add_method('GetInverse', 'uint32_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint16_t', [], is_const=True) ## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv4Mask', [], is_static=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv4Mask', 'other')], is_const=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function] cls.add_method('Set', 'void', [param('uint32_t', 'mask')]) return def register_Ns3Ipv6Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor] cls.add_constructor([param('char const *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor] cls.add_constructor([param('uint8_t *', 'address')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor] cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function] cls.add_method('CombinePrefix', 'ns3::Ipv6Address', [param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Ipv6Address', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function] cls.add_method('Deserialize', 'ns3::Ipv6Address', [param('uint8_t const *', 'buf')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function] cls.add_method('GetAllHostsMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function] cls.add_method('GetAllNodesMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function] cls.add_method('GetAllRoutersMulticast', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function] cls.add_method('GetAny', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function] cls.add_method('GetIpv4MappedAddress', 'ns3::Ipv4Address', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Address', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function] cls.add_method('IsAllHostsMulticast', 'bool', [], deprecated=True, is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function] cls.add_method('IsAllNodesMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function] cls.add_method('IsAllRoutersMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function] cls.add_method('IsAny', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function] cls.add_method('IsDocumentation', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Address const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function] cls.add_method('IsIpv4MappedAddress', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function] cls.add_method('IsLinkLocal', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function] cls.add_method('IsLinkLocalMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function] cls.add_method('IsLocalhost', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function] cls.add_method('IsMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function] cls.add_method('IsSolicitedMulticast', 'bool', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function] cls.add_method('MakeAutoconfiguredAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac16Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac48Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function] cls.add_method('MakeAutoconfiguredLinkLocalAddress', 'ns3::Ipv6Address', [param('ns3::Mac64Address', 'mac')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function] cls.add_method('MakeIpv4MappedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv4Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function] cls.add_method('MakeSolicitedAddress', 'ns3::Ipv6Address', [param('ns3::Ipv6Address', 'addr')], is_static=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function] cls.add_method('Serialize', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function] cls.add_method('Set', 'void', [param('char const *', 'address')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function] cls.add_method('Set', 'void', [param('uint8_t *', 'address')]) return def register_Ns3Ipv6Prefix_methods(root_module, cls): cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor] cls.add_constructor([param('uint8_t *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor] cls.add_constructor([param('char const *', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor] cls.add_constructor([param('uint8_t', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')]) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')]) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function] cls.add_method('GetBytes', 'void', [param('uint8_t *', 'buf')], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function] cls.add_method('GetLoopback', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function] cls.add_method('GetOnes', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function] cls.add_method('GetPrefixLength', 'uint8_t', [], is_const=True) ## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function] cls.add_method('GetZero', 'ns3::Ipv6Prefix', [], is_static=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ipv6Prefix const &', 'other')], is_const=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function] cls.add_method('IsMatch', 'bool', [param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')], is_const=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) return def register_Ns3Mac48Address_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor] cls.add_constructor([param('char const *', 'str')]) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function] cls.add_method('Allocate', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function] cls.add_method('ConvertFrom', 'ns3::Mac48Address', [param('ns3::Address const &', 'address')], is_static=True) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function] cls.add_method('CopyFrom', 'void', [param('uint8_t const *', 'buffer')]) ## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function] cls.add_method('CopyTo', 'void', [param('uint8_t *', 'buffer')], is_const=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function] cls.add_method('GetBroadcast', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv4Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function] cls.add_method('GetMulticast', 'ns3::Mac48Address', [param('ns3::Ipv6Address', 'address')], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function] cls.add_method('GetMulticast6Prefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function] cls.add_method('GetMulticastPrefix', 'ns3::Mac48Address', [], is_static=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function] cls.add_method('IsBroadcast', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function] cls.add_method('IsGroup', 'bool', [], is_const=True) ## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function] cls.add_method('IsMatchingType', 'bool', [param('ns3::Address const &', 'address')], is_static=True) return def register_Ns3MpiInterface_methods(root_module, cls): ## mpi-interface.h (module 'mpi'): ns3::MpiInterface::MpiInterface() [constructor] cls.add_constructor([]) ## mpi-interface.h (module 'mpi'): ns3::MpiInterface::MpiInterface(ns3::MpiInterface const & arg0) [copy constructor] cls.add_constructor([param('ns3::MpiInterface const &', 'arg0')]) ## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_static=True) ## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Disable() [member function] cls.add_method('Disable', 'void', [], is_static=True) ## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::Enable(int * pargc, char * * * pargv) [member function] cls.add_method('Enable', 'void', [param('int *', 'pargc'), param('char * * *', 'pargv')], is_static=True) ## mpi-interface.h (module 'mpi'): static uint32_t ns3::MpiInterface::GetSize() [member function] cls.add_method('GetSize', 'uint32_t', [], is_static=True) ## mpi-interface.h (module 'mpi'): static uint32_t ns3::MpiInterface::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_static=True) ## mpi-interface.h (module 'mpi'): static bool ns3::MpiInterface::IsEnabled() [member function] cls.add_method('IsEnabled', 'bool', [], is_static=True) ## mpi-interface.h (module 'mpi'): static void ns3::MpiInterface::SendPacket(ns3::Ptr<ns3::Packet> p, ns3::Time const & rxTime, uint32_t node, uint32_t dev) [member function] cls.add_method('SendPacket', 'void', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Time const &', 'rxTime'), param('uint32_t', 'node'), param('uint32_t', 'dev')], is_static=True) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3PacketMetadata_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor] cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::PacketMetadata const &', 'o')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [param('ns3::Buffer', 'buffer')], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function] cls.add_method('CreateFragment', 'ns3::PacketMetadata', [param('uint32_t', 'start'), param('uint32_t', 'end')], is_const=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function] cls.add_method('Enable', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'end')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'start')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function] cls.add_method('RemoveHeader', 'void', [param('ns3::Header const &', 'header'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function] cls.add_method('RemoveTrailer', 'void', [param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')]) ## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3PacketMetadataItem_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor] cls.add_constructor([]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable] cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable] cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable] cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable] cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable] cls.add_instance_attribute('isFragment', 'bool', is_const=False) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3PacketMetadataItemIterator_methods(root_module, cls): ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')]) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor] cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')]) ## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketMetadata::Item', []) return def register_Ns3PacketTagIterator_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')]) ## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function] cls.add_method('Next', 'ns3::PacketTagIterator::Item', []) return def register_Ns3PacketTagIteratorItem_methods(root_module, cls): ## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')]) ## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function] cls.add_method('GetTag', 'void', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_const=True) return def register_Ns3PacketTagList_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor] cls.add_constructor([param('ns3::PacketTagList const &', 'o')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function] cls.add_method('Add', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function] cls.add_method('Head', 'ns3::PacketTagList::TagData const *', [], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function] cls.add_method('Peek', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function] cls.add_method('Remove', 'bool', [param('ns3::Tag &', 'tag')]) ## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function] cls.add_method('RemoveAll', 'void', []) ## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function] cls.add_method('Replace', 'bool', [param('ns3::Tag &', 'tag')]) return def register_Ns3PacketTagListTagData_methods(root_module, cls): ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor] cls.add_constructor([]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor] cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')]) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable] cls.add_instance_attribute('count', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable] cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable] cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable] cls.add_instance_attribute('size', 'uint32_t', is_const=False) ## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable] cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False) return def register_Ns3ParallelCommunicationInterface_methods(root_module, cls): ## parallel-communication-interface.h (module 'mpi'): ns3::ParallelCommunicationInterface::ParallelCommunicationInterface() [constructor] cls.add_constructor([]) ## parallel-communication-interface.h (module 'mpi'): ns3::ParallelCommunicationInterface::ParallelCommunicationInterface(ns3::ParallelCommunicationInterface const & arg0) [copy constructor] cls.add_constructor([param('ns3::ParallelCommunicationInterface const &', 'arg0')]) ## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::Destroy() [member function] cls.add_method('Destroy', 'void', [], is_pure_virtual=True, is_virtual=True) ## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::Disable() [member function] cls.add_method('Disable', 'void', [], is_pure_virtual=True, is_virtual=True) ## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::Enable(int * pargc, char * * * pargv) [member function] cls.add_method('Enable', 'void', [param('int *', 'pargc'), param('char * * *', 'pargv')], is_pure_virtual=True, is_virtual=True) ## parallel-communication-interface.h (module 'mpi'): uint32_t ns3::ParallelCommunicationInterface::GetSize() [member function] cls.add_method('GetSize', 'uint32_t', [], is_pure_virtual=True, is_virtual=True) ## parallel-communication-interface.h (module 'mpi'): uint32_t ns3::ParallelCommunicationInterface::GetSystemId() [member function] cls.add_method('GetSystemId', 'uint32_t', [], is_pure_virtual=True, is_virtual=True) ## parallel-communication-interface.h (module 'mpi'): bool ns3::ParallelCommunicationInterface::IsEnabled() [member function] cls.add_method('IsEnabled', 'bool', [], is_pure_virtual=True, is_virtual=True) ## parallel-communication-interface.h (module 'mpi'): void ns3::ParallelCommunicationInterface::SendPacket(ns3::Ptr<ns3::Packet> p, ns3::Time const & rxTime, uint32_t node, uint32_t dev) [member function] cls.add_method('SendPacket', 'void', [param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Time const &', 'rxTime'), param('uint32_t', 'node'), param('uint32_t', 'dev')], is_pure_virtual=True, is_virtual=True) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Tag_methods(root_module, cls): ## tag.h (module 'network'): ns3::Tag::Tag() [constructor] cls.add_constructor([]) ## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor] cls.add_constructor([param('ns3::Tag const &', 'arg0')]) ## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function] cls.add_method('Deserialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_virtual=True) ## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function] cls.add_method('Serialize', 'void', [param('ns3::TagBuffer', 'i')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3TagBuffer_methods(root_module, cls): ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor] cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')]) ## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor] cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function] cls.add_method('CopyFrom', 'void', [param('ns3::TagBuffer', 'o')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function] cls.add_method('Read', 'void', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function] cls.add_method('ReadDouble', 'double', []) ## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function] cls.add_method('ReadU16', 'uint16_t', []) ## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function] cls.add_method('ReadU32', 'uint32_t', []) ## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function] cls.add_method('ReadU64', 'uint64_t', []) ## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function] cls.add_method('ReadU8', 'uint8_t', []) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function] cls.add_method('TrimAtEnd', 'void', [param('uint32_t', 'trim')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function] cls.add_method('Write', 'void', [param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function] cls.add_method('WriteDouble', 'void', [param('double', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function] cls.add_method('WriteU16', 'void', [param('uint16_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function] cls.add_method('WriteU32', 'void', [param('uint32_t', 'data')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function] cls.add_method('WriteU64', 'void', [param('uint64_t', 'v')]) ## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function] cls.add_method('WriteU8', 'void', [param('uint8_t', 'v')]) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')], deprecated=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint32_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint32_t', [], is_static=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function] cls.add_method('GetSize', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('uint32_t', 'i')], is_const=True) ## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'uint32_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function] cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'uid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable] cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable] cls.add_instance_attribute('callback', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_unary_numeric_operator('-') cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor] cls.add_constructor([param('long double', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor] cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t', 'v')], is_static=True) ## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3Chunk_methods(root_module, cls): ## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor] cls.add_constructor([]) ## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor] cls.add_constructor([param('ns3::Chunk const &', 'arg0')]) ## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Header_methods(root_module, cls): cls.add_output_stream_operator() ## header.h (module 'network'): ns3::Header::Header() [constructor] cls.add_constructor([]) ## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor] cls.add_constructor([param('ns3::Header const &', 'arg0')]) ## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_virtual=True) ## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function] cls.add_method('IsInitialized', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], visibility='protected', is_virtual=True) ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], visibility='protected', is_virtual=True) return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) ## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function] cls.add_method('Cleanup', 'void', [], is_static=True) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right')) cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('!=') cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right')) cls.add_output_stream_operator() cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3Trailer_methods(root_module, cls): cls.add_output_stream_operator() ## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor] cls.add_constructor([]) ## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor] cls.add_constructor([param('ns3::Trailer const &', 'arg0')]) ## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function] cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'end')], is_pure_virtual=True, is_virtual=True) ## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_pure_virtual=True, is_const=True, is_virtual=True) ## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function] cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_const=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function] cls.add_method('GetTypeid', 'std::string', [], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_pure_virtual=True, is_const=True, is_virtual=True) ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3EmptyAttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, visibility='private', is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], visibility='private', is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, visibility='private', is_virtual=True) return def register_Ns3Ipv4AddressChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')]) return def register_Ns3Ipv4AddressValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Address const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Address', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Address const &', 'value')]) return def register_Ns3Ipv4MaskChecker_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')]) return def register_Ns3Ipv4MaskValue_methods(root_module, cls): ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor] cls.add_constructor([]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')]) ## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor] cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')]) ## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv4Mask', [], is_const=True) ## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv4Mask const &', 'value')]) return def register_Ns3Ipv6AddressChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')]) return def register_Ns3Ipv6AddressValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Address const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Address', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Address const &', 'value')]) return def register_Ns3Ipv6PrefixChecker_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')]) return def register_Ns3Ipv6PrefixValue_methods(root_module, cls): ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor] cls.add_constructor([]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')]) ## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor] cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')]) ## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function] cls.add_method('Get', 'ns3::Ipv6Prefix', [], is_const=True) ## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Ipv6Prefix const &', 'value')]) return def register_Ns3Mac48AddressChecker_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')]) return def register_Ns3Mac48AddressValue_methods(root_module, cls): ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor] cls.add_constructor([]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')]) ## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor] cls.add_constructor([param('ns3::Mac48Address const &', 'value')]) ## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Mac48Address', [], is_const=True) ## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Mac48Address const &', 'value')]) return def register_Ns3MpiReceiver_methods(root_module, cls): ## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver::MpiReceiver() [constructor] cls.add_constructor([]) ## mpi-receiver.h (module 'mpi'): ns3::MpiReceiver::MpiReceiver(ns3::MpiReceiver const & arg0) [copy constructor] cls.add_constructor([param('ns3::MpiReceiver const &', 'arg0')]) ## mpi-receiver.h (module 'mpi'): static ns3::TypeId ns3::MpiReceiver::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## mpi-receiver.h (module 'mpi'): void ns3::MpiReceiver::Receive(ns3::Ptr<ns3::Packet> p) [member function] cls.add_method('Receive', 'void', [param('ns3::Ptr< ns3::Packet >', 'p')]) ## mpi-receiver.h (module 'mpi'): void ns3::MpiReceiver::SetReceiveCallback(ns3::Callback<void, ns3::Ptr<ns3::Packet>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function] cls.add_method('SetReceiveCallback', 'void', [param('ns3::Callback< void, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')]) ## mpi-receiver.h (module 'mpi'): void ns3::MpiReceiver::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], visibility='private', is_virtual=True) return def register_Ns3NixVector_methods(root_module, cls): cls.add_output_stream_operator() ## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor] cls.add_constructor([]) ## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor] cls.add_constructor([param('ns3::NixVector const &', 'o')]) ## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function] cls.add_method('AddNeighborIndex', 'void', [param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function] cls.add_method('BitCount', 'uint32_t', [param('uint32_t', 'numberOfNeighbors')], is_const=True) ## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function] cls.add_method('Deserialize', 'uint32_t', [param('uint32_t const *', 'buffer'), param('uint32_t', 'size')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function] cls.add_method('ExtractNeighborIndex', 'uint32_t', [param('uint32_t', 'numberOfBits')]) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function] cls.add_method('GetRemainingBits', 'uint32_t', []) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) return def register_Ns3Packet_methods(root_module, cls): cls.add_output_stream_operator() ## packet.h (module 'network'): ns3::Packet::Packet() [constructor] cls.add_constructor([]) ## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor] cls.add_constructor([param('ns3::Packet const &', 'o')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor] cls.add_constructor([param('uint32_t', 'size')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')]) ## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor] cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function] cls.add_method('AddAtEnd', 'void', [param('ns3::Ptr< ns3::Packet const >', 'packet')]) ## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function] cls.add_method('AddByteTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function] cls.add_method('AddHeader', 'void', [param('ns3::Header const &', 'header')]) ## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function] cls.add_method('AddPacketTag', 'void', [param('ns3::Tag const &', 'tag')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function] cls.add_method('AddPaddingAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function] cls.add_method('AddTrailer', 'void', [param('ns3::Trailer const &', 'trailer')]) ## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function] cls.add_method('BeginItem', 'ns3::PacketMetadata::ItemIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::Packet >', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function] cls.add_method('CopyData', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function] cls.add_method('CopyData', 'void', [param('std::ostream *', 'os'), param('uint32_t', 'size')], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function] cls.add_method('CreateFragment', 'ns3::Ptr< ns3::Packet >', [param('uint32_t', 'start'), param('uint32_t', 'length')], is_const=True) ## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function] cls.add_method('EnableChecking', 'void', [], is_static=True) ## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function] cls.add_method('EnablePrinting', 'void', [], is_static=True) ## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function] cls.add_method('FindFirstMatchingByteTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function] cls.add_method('GetByteTagIterator', 'ns3::ByteTagIterator', [], is_const=True) ## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function] cls.add_method('GetNixVector', 'ns3::Ptr< ns3::NixVector >', [], is_const=True) ## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function] cls.add_method('GetPacketTagIterator', 'ns3::PacketTagIterator', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function] cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function] cls.add_method('GetSize', 'uint32_t', [], is_const=True) ## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function] cls.add_method('GetUid', 'uint64_t', [], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function] cls.add_method('PeekHeader', 'uint32_t', [param('ns3::Header &', 'header')], is_const=True) ## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function] cls.add_method('PeekPacketTag', 'bool', [param('ns3::Tag &', 'tag')], is_const=True) ## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function] cls.add_method('PeekTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function] cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function] cls.add_method('PrintByteTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function] cls.add_method('PrintPacketTags', 'void', [param('std::ostream &', 'os')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function] cls.add_method('RemoveAllByteTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function] cls.add_method('RemoveAllPacketTags', 'void', []) ## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function] cls.add_method('RemoveAtEnd', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function] cls.add_method('RemoveAtStart', 'void', [param('uint32_t', 'size')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function] cls.add_method('RemoveHeader', 'uint32_t', [param('ns3::Header &', 'header')]) ## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function] cls.add_method('RemovePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function] cls.add_method('RemoveTrailer', 'uint32_t', [param('ns3::Trailer &', 'trailer')]) ## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function] cls.add_method('ReplacePacketTag', 'bool', [param('ns3::Tag &', 'tag')]) ## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function] cls.add_method('Serialize', 'uint32_t', [param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')], is_const=True) ## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function] cls.add_method('SetNixVector', 'void', [param('ns3::Ptr< ns3::NixVector >', 'nixVector')]) ## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function] cls.add_method('ToString', 'std::string', [], is_const=True) return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3AddressChecker_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')]) return def register_Ns3AddressValue_methods(root_module, cls): ## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor] cls.add_constructor([]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor] cls.add_constructor([param('ns3::AddressValue const &', 'arg0')]) ## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor] cls.add_constructor([param('ns3::Address const &', 'value')]) ## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function] cls.add_method('Get', 'ns3::Address', [], is_const=True) ## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Address const &', 'value')]) return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module) register_functions_ns3_Hash(module.get_submodule('Hash'), root_module) register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def register_functions_ns3_TracedValueCallback(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()
GdZ/scriptfile
refs/heads/master
software/googleAppEngine/lib/webapp2/tests/resources/handlers.py
25
import webapp2 class LazyHandler(webapp2.RequestHandler): def get(self, **kwargs): self.response.out.write('I am a laaazy view.') class CustomMethodHandler(webapp2.RequestHandler): def custom_method(self): self.response.out.write('I am a custom method.') def handle_exception(request, response, exception): return webapp2.Response(body='Hello, custom response world!')
SnappleCap/oh-mainline
refs/heads/master
vendor/packages/gdata/tests/all_tests_local.py
41
#!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This module is used for version 2 of the Google Data APIs. __author__ = 'j.s@google.com (Jeff Scudder)' import unittest import all_tests import gdata.test_config as conf conf.options.set_value('runlive', 'false') def suite(): return unittest.TestSuite((atom_tests.core_test.suite(),)) if __name__ == '__main__': unittest.TextTestRunner().run(all_tests.suite())
rakeshmi/tempest
refs/heads/master
tempest/api_schema/response/compute/v2_1/floating_ips.py
17
# Copyright 2014 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api_schema.response.compute.v2_1 import parameter_types common_floating_ip_info = { 'type': 'object', 'properties': { # NOTE: Now the type of 'id' is integer, but # here allows 'string' also because we will be # able to change it to 'uuid' in the future. 'id': {'type': ['integer', 'string']}, 'pool': {'type': ['string', 'null']}, 'instance_id': {'type': ['string', 'null']}, 'ip': parameter_types.ip_address, 'fixed_ip': parameter_types.ip_address }, 'additionalProperties': False, 'required': ['id', 'pool', 'instance_id', 'ip', 'fixed_ip'], } list_floating_ips = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'floating_ips': { 'type': 'array', 'items': common_floating_ip_info }, }, 'additionalProperties': False, 'required': ['floating_ips'], } } create_get_floating_ip = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'floating_ip': common_floating_ip_info }, 'additionalProperties': False, 'required': ['floating_ip'], } } list_floating_ip_pools = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'floating_ip_pools': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'name': {'type': 'string'} }, 'additionalProperties': False, 'required': ['name'], } } }, 'additionalProperties': False, 'required': ['floating_ip_pools'], } } add_remove_floating_ip = { 'status_code': [202] } create_floating_ips_bulk = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'floating_ips_bulk_create': { 'type': 'object', 'properties': { 'interface': {'type': ['string', 'null']}, 'ip_range': {'type': 'string'}, 'pool': {'type': ['string', 'null']}, }, 'additionalProperties': False, 'required': ['interface', 'ip_range', 'pool'], } }, 'additionalProperties': False, 'required': ['floating_ips_bulk_create'], } } delete_floating_ips_bulk = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'floating_ips_bulk_delete': {'type': 'string'} }, 'additionalProperties': False, 'required': ['floating_ips_bulk_delete'], } } list_floating_ips_bulk = { 'status_code': [200], 'response_body': { 'type': 'object', 'properties': { 'floating_ip_info': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'address': parameter_types.ip_address, 'instance_uuid': {'type': ['string', 'null']}, 'interface': {'type': ['string', 'null']}, 'pool': {'type': ['string', 'null']}, 'project_id': {'type': ['string', 'null']}, 'fixed_ip': parameter_types.ip_address }, 'additionalProperties': False, # NOTE: fixed_ip is introduced after JUNO release, # So it is not defined as 'required'. 'required': ['address', 'instance_uuid', 'interface', 'pool', 'project_id'], } } }, 'additionalProperties': False, 'required': ['floating_ip_info'], } }
GdZ/scriptfile
refs/heads/master
software/googleAppEngine/lib/django_1_3/tests/regressiontests/middleware/urls.py
122
from django.conf.urls.defaults import patterns urlpatterns = patterns('', (r'^noslash$', 'view'), (r'^slash/$', 'view'), (r'^needsquoting#/$', 'view'), )
ThirdProject/android_external_chromium_org
refs/heads/cm-11.0
tools/telemetry/telemetry/core/__init__.py
461
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file.
waytai/odoo
refs/heads/8.0
addons/product_extended/product_extended.py
185
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2014 OpenERP S.A. (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields from openerp.osv import osv class product_template(osv.osv): _name = 'product.template' _inherit = 'product.template' def compute_price(self, cr, uid, product_ids, template_ids=False, recursive=False, test=False, real_time_accounting = False, context=None): ''' Will return test dict when the test = False Multiple ids at once? testdict is used to inform the user about the changes to be made ''' testdict = {} if product_ids: ids = product_ids model = 'product.product' else: ids = template_ids model = 'product.template' for prod_id in ids: bom_obj = self.pool.get('mrp.bom') if model == 'product.product': bom_id = bom_obj._bom_find(cr, uid, product_id=prod_id, context=context) else: bom_id = bom_obj._bom_find(cr, uid, product_tmpl_id=prod_id, context=context) if bom_id: # In recursive mode, it will first compute the prices of child boms if recursive: #Search the products that are components of this bom of prod_id bom = bom_obj.browse(cr, uid, bom_id, context=context) #Call compute_price on these subproducts prod_set = set([x.product_id.id for x in bom.bom_line_ids]) res = self.compute_price(cr, uid, list(prod_set), recursive=recursive, test=test, real_time_accounting = real_time_accounting, context=context) if test: testdict.update(res) #Use calc price to calculate and put the price on the product of the BoM if necessary price = self._calc_price(cr, uid, bom_obj.browse(cr, uid, bom_id, context=context), test=test, real_time_accounting = real_time_accounting, context=context) if test: testdict.update({prod_id : price}) if test: return testdict else: return True def _calc_price(self, cr, uid, bom, test = False, real_time_accounting=False, context=None): if context is None: context={} price = 0 uom_obj = self.pool.get("product.uom") tmpl_obj = self.pool.get('product.template') for sbom in bom.bom_line_ids: my_qty = sbom.product_qty / sbom.product_efficiency if not sbom.attribute_value_ids: # No attribute_value_ids means the bom line is not variant specific price += uom_obj._compute_price(cr, uid, sbom.product_id.uom_id.id, sbom.product_id.standard_price, sbom.product_uom.id) * my_qty if bom.routing_id: for wline in bom.routing_id.workcenter_lines: wc = wline.workcenter_id cycle = wline.cycle_nbr hour = (wc.time_start + wc.time_stop + cycle * wc.time_cycle) * (wc.time_efficiency or 1.0) price += wc.costs_cycle * cycle + wc.costs_hour * hour price = self.pool.get('product.uom')._compute_price(cr,uid,bom.product_uom.id, price, bom.product_id.uom_id.id) #Convert on product UoM quantities if price > 0: price = uom_obj._compute_price(cr, uid, bom.product_uom.id, price / bom.product_qty, bom.product_id.uom_id.id) product = tmpl_obj.browse(cr, uid, bom.product_tmpl_id.id, context=context) if not test: if (product.valuation != "real_time" or not real_time_accounting): tmpl_obj.write(cr, uid, [product.id], {'standard_price' : price}, context=context) else: #Call wizard function here wizard_obj = self.pool.get("stock.change.standard.price") ctx = context.copy() ctx.update({'active_id': product.id, 'active_model': 'product.template'}) wiz_id = wizard_obj.create(cr, uid, {'new_price': price}, context=ctx) wizard_obj.change_price(cr, uid, [wiz_id], context=ctx) return price class product_bom(osv.osv): _inherit = 'mrp.bom' _columns = { 'standard_price': fields.related('product_tmpl_id','standard_price',type="float",relation="product.product",string="Standard Price",store=False) } product_bom() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
satary/fretty
refs/heads/master
image_widget.py
2
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) Grigoriy A. Armeev, 2015 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as· # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License v2 for more details. # Cheers, Satary. # # This widget provides simple interface for showing pictures. # It can be resized saving aspect ratio and it provides signals for mouse click and hover. import sys, os from PyQt4 import QtGui,QtCore ''' This widget implements folders and pictures ''' class ImageWidget(QtGui.QWidget): def __init__(self,parent=None): super(ImageWidget, self).__init__() mainLayout=QtGui.QVBoxLayout(self) mainLayout.setSpacing(0) mainLayout.setContentsMargins(0,0,0,0) self.label=CustomLabel(self) mainLayout.addWidget(self.label) self.connect(self.label,QtCore.SIGNAL("mousePressSignal"),self.emitWigetPressedSignal) self.connect(self.label,QtCore.SIGNAL("mouseHoverSignal"),self.emitWigetHoveredSignal) def setImage(self,path): ''' Reads image from path and sets it to wiget ''' self.currentPath=self.label.path=path self.pixmap=QtGui.QPixmap(path) self.label.initsize=self.pixmap.size() size=self.size()#-QtCore.QSize(20,20) self.resizeImage(size) def emitWigetPressedSignal(self, coord, path): ''' Emits signal imageWigetPressed wich provides coordinates of in pixels (as on initial picture) and path to that picture ''' self.emit(QtCore.SIGNAL("imageWigetPressed"),coord,path) def emitWigetHoveredSignal(self, coord, path): ''' Emits signal imageWigetHovered wich provides coordinates of in pixels (as on initial picture) and path to that picture ''' self.emit(QtCore.SIGNAL("imageWigetHovered"),coord,path) def resizeEvent( self, resizeEvent): ''' Overrides QWigets resizeEvent for better quality resizing of the pixmap ''' super(ImageWidget, self).resizeEvent(resizeEvent) self.resizeImage(resizeEvent.size()) def resizeImage(self, size): ''' Resizes image keeping aspect ratio ''' try: self.label.resize(size) pixmap=self.pixmap.scaled(size, QtCore.Qt.KeepAspectRatio) self.label.setPixmap(pixmap) self.label.update() self.label.picSize=pixmap.size() except: self.label.setText('No image loaded.') class CustomLabel(QtGui.QLabel): ''' This class provides modified QLabel which provides QSignals with coordinates in coord. system of initial image Provides signal for mouse hovering. ''' def __init__(self, parent=None, path=None): super(CustomLabel, self).__init__(parent) self.path=path self.setMouseTracking(True) self.timer = QtCore.QTimer() self.timer.timeout.connect(self.hoverMouse) self.setAlignment(QtCore.Qt.AlignCenter) self.setSizePolicy(QtGui.QSizePolicy.Ignored,QtGui.QSizePolicy.Ignored) def calcCoord(self,x,y): lblwidth=self.size().width() picwidth=self.picSize.width() lblheight=self.size().height() picheight=self.picSize.height() initwidth=self.initsize.width() initheight=self.initsize.height() x=int((x-(lblwidth-picwidth)/2.0)*initwidth/picwidth) y=int((y-(lblheight-picheight)/2.0)*initheight/picheight) if (x>0) and (y>0) and (x < initwidth) and (y < initheight): return x,y else: return None def mousePressEvent(self, e): try: super(CustomLabel, self).mousePressEvent(e) coord=self.calcCoord(e.x(),e.y()) if coord!=None: self.coord=coord self.emit(QtCore.SIGNAL("mousePressSignal"),self.coord,self.path) except: pass def mouseMoveEvent(self, e): try: super(CustomLabel, self).mouseMoveEvent(e) self.coord=self.calcCoord(e.x(),e.y()) self.timer.stop() self.timer.start(600) except: pass def hoverMouse(self): self.timer.stop() if self.underMouse() and (self.coord!=None): self.emit(QtCore.SIGNAL("mouseHoverSignal"),self.coord,self.path)
zaina/nova
refs/heads/master
nova/api/openstack/compute/contrib/extended_volumes.py
56
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The Extended Volumes API extension.""" from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import objects authorize = extensions.soft_extension_authorizer('compute', 'extended_volumes') class ExtendedVolumesController(wsgi.Controller): def __init__(self, *args, **kwargs): super(ExtendedVolumesController, self).__init__(*args, **kwargs) def _extend_server(self, context, server, instance): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) volume_ids = [bdm.volume_id for bdm in bdms if bdm.volume_id] key = "%s:volumes_attached" % Extended_volumes.alias server[key] = [{'id': volume_id} for volume_id in volume_ids] @wsgi.extends def show(self, req, resp_obj, id): context = req.environ['nova.context'] if authorize(context): server = resp_obj.obj['server'] db_instance = req.get_db_instance(server['id']) # server['id'] is guaranteed to be in the cache due to # the core API adding it in its 'show' method. self._extend_server(context, server, db_instance) @wsgi.extends def detail(self, req, resp_obj): context = req.environ['nova.context'] if authorize(context): servers = list(resp_obj.obj['servers']) for server in servers: db_instance = req.get_db_instance(server['id']) # server['id'] is guaranteed to be in the cache due to # the core API adding it in its 'detail' method. self._extend_server(context, server, db_instance) class Extended_volumes(extensions.ExtensionDescriptor): """Extended Volumes support.""" name = "ExtendedVolumes" alias = "os-extended-volumes" namespace = ("http://docs.openstack.org/compute/ext/" "extended_volumes/api/v1.1") updated = "2013-06-07T00:00:00Z" def get_controller_extensions(self): controller = ExtendedVolumesController() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension] def get_resources(self): return []
Pablo126/SSBW
refs/heads/master
Entrega1/lib/python3.5/site-packages/django/conf/locale/mk/formats.py
504
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # from __future__ import unicode_literals # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'd F Y' TIME_FORMAT = 'H:i' DATETIME_FORMAT = 'j. F Y H:i' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'j.m.Y' SHORT_DATETIME_FORMAT = 'j.m.Y H:i' FIRST_DAY_OF_WEEK = 1 # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06' '%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' '%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59' '%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200' '%d. %m. %Y %H:%M', # '25. 10. 2006 14:30' '%d. %m. %Y', # '25. 10. 2006' '%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59' '%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200' '%d. %m. %y %H:%M', # '25. 10. 06 14:30' '%d. %m. %y', # '25. 10. 06' ] DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = '.' NUMBER_GROUPING = 3
OpringaoDoTurno/airflow
refs/heads/master
airflow/security/kerberos.py
5
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import subprocess import sys import time from airflow import configuration, LoggingMixin NEED_KRB181_WORKAROUND = None log = LoggingMixin().log def renew_from_kt(): # The config is specified in seconds. But we ask for that same amount in # minutes to give ourselves a large renewal buffer. renewal_lifetime = "%sm" % configuration.getint('kerberos', 'reinit_frequency') principal = configuration.get('kerberos', 'principal').replace("_HOST", socket.getfqdn()) cmdv = [configuration.get('kerberos', 'kinit_path'), "-r", renewal_lifetime, "-k", # host ticket "-t", configuration.get('kerberos', 'keytab'), # specify keytab "-c", configuration.get('kerberos', 'ccache'), # specify credentials cache principal] log.info("Reinitting kerberos from keytab: " + " ".join(cmdv)) subp = subprocess.Popen(cmdv, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, bufsize=-1, universal_newlines=True) subp.wait() if subp.returncode != 0: log.error("Couldn't reinit from keytab! `kinit' exited with %s.\n%s\n%s" % ( subp.returncode, "\n".join(subp.stdout.readlines()), "\n".join(subp.stderr.readlines()))) sys.exit(subp.returncode) global NEED_KRB181_WORKAROUND if NEED_KRB181_WORKAROUND is None: NEED_KRB181_WORKAROUND = detect_conf_var() if NEED_KRB181_WORKAROUND: # (From: HUE-640). Kerberos clock have seconds level granularity. Make sure we # renew the ticket after the initial valid time. time.sleep(1.5) perform_krb181_workaround() def perform_krb181_workaround(): cmdv = [configuration.get('kerberos', 'kinit_path'), "-c", configuration.get('kerberos', 'ccache'), "-R"] # Renew ticket_cache log.info("Renewing kerberos ticket to work around kerberos 1.8.1: " + " ".join(cmdv)) ret = subprocess.call(cmdv, close_fds=True) if ret != 0: principal = "%s/%s" % (configuration.get('kerberos', 'principal'), socket.getfqdn()) fmt_dict = dict(princ=principal, ccache=configuration.get('kerberos', 'principal')) log.error("Couldn't renew kerberos ticket in order to work around " "Kerberos 1.8.1 issue. Please check that the ticket for " "'%(princ)s' is still renewable:\n" " $ kinit -f -c %(ccache)s\n" "If the 'renew until' date is the same as the 'valid starting' " "date, the ticket cannot be renewed. Please check your KDC " "configuration, and the ticket renewal policy (maxrenewlife) " "for the '%(princ)s' and `krbtgt' principals." % fmt_dict) sys.exit(ret) def detect_conf_var(): """Return true if the ticket cache contains "conf" information as is found in ticket caches of Kerberos 1.8.1 or later. This is incompatible with the Sun Java Krb5LoginModule in Java6, so we need to take an action to work around it. """ ticket_cache = configuration.get('kerberos', 'ccache') with open(ticket_cache, 'rb') as f: # Note: this file is binary, so we check against a bytearray. return b'X-CACHECONF:' in f.read() def run(): if configuration.get('kerberos', 'keytab') is None: log.debug("Keytab renewer not starting, no keytab configured") sys.exit(0) while True: renew_from_kt() time.sleep(configuration.getint('kerberos', 'reinit_frequency'))
chjw8016/GreenOdoo7-haibao
refs/heads/master
openerp/addons/account/wizard/account_unreconcile.py
56
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv class account_unreconcile(osv.osv_memory): _name = "account.unreconcile" _description = "Account Unreconcile" def trans_unrec(self, cr, uid, ids, context=None): obj_move_line = self.pool.get('account.move.line') if context is None: context = {} if context.get('active_ids', False): obj_move_line._remove_move_reconcile(cr, uid, context['active_ids'], context=context) return {'type': 'ir.actions.act_window_close'} account_unreconcile() class account_unreconcile_reconcile(osv.osv_memory): _name = "account.unreconcile.reconcile" _description = "Account Unreconcile Reconcile" def trans_unrec_reconcile(self, cr, uid, ids, context=None): obj_move_reconcile = self.pool.get('account.move.reconcile') if context is None: context = {} rec_ids = context['active_ids'] if rec_ids: obj_move_reconcile.unlink(cr, uid, rec_ids, context=context) return {'type': 'ir.actions.act_window_close'} account_unreconcile_reconcile() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
sysadminmatmoz/odoo-clearcorp
refs/heads/8.0
account_partner_balance_report/report/account_partner_balance_report.py
3
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Addons modules by CLEARCORP S.A. # Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp import pooler from openerp.addons.account_report_lib.account_report_base import accountReportbase #Library Base class Parser(accountReportbase): def __init__(self, cr, uid, name, context): super(Parser, self).__init__(cr, uid, name, context=context) self.pool = pooler.get_pool(self.cr.dbname) self.localcontext.update({ 'cr': cr, 'uid': uid, 'context':context, 'storage':{}, 'set_data_template': self.set_data_template, 'get_print_name': self.get_print_name, 'partner_in_dictionary':self.partner_in_dictionary, 'get_data_by_partner':self.get_data_by_partner, 'define_debit_credit':self.define_debit_credit, 'error_message': self.error_message, 'compute_total_debit_credit':self.compute_total_debit_credit, 'compute_total_currency_company':self.compute_total_currency_company, 'get_company_user':self.get_company_user, 'get_currency_company_user_name': self.get_currency_company_user_name, 'compute_exchange_rate':self.compute_exchange_rate, }) """ 1. Get account_move_lines related with partner and classify by type #=========================================================================== # This method create a dictionary. In each key, it has another dictionary # that contains a list of move lines. This structure classified all move # lines first by currency and then, by partner #=========================================================================== For built_result method, the result could be iterated as follow: *result is a dictionary of dictionaries for partner, currency in result.iteritems(): print partner (in this case, it will display the partner's name) for currency, type in partner.iteritems(): print partner (in this case, it will currency's name) for type, lines in currency.iteritems(): print type (in this case, it will type's name, it would be Receivable or Payable for line in lines: print line.name, line.ref ... (lines is a list of move_lines) Also, return a list of partners sort by name. This is for show partners sort by name @param partner_list: It can be one partner o a list of them. The user would select one or many in form list view. """ def get_account_move_lines(self, cr, uid, objects, context=None): move_line_obj = self.pool.get('account.move.line') partner_ids = [] move_lines_ids = [] res = {} currency_company = self.pool.get('res.users').browse(cr, uid, [uid])[0].company_id.currency_id """ 1. Extract partner's id and then search move lines.""" for partner in objects: partner_ids.append(partner.id) """2. Search move lines that match with this partners """ move_lines_ids = move_line_obj.search(cr, uid, [('partner_id','in', partner_ids),('reconcile_id','=',False),('account_id.type','in',['payable','receivable'])], order = 'date ASC') move_lines = move_line_obj.browse(cr, uid, move_lines_ids) """ 3. Classified move_lines by partner and account_type """ #Classified move_lines by currency and partner for line in move_lines: if line.partner_id.id not in res.keys(): res[line.partner_id.id] = {} if line.account_id.type not in res[line.partner_id.id].keys(): res[line.partner_id.id][line.account_id.type] = {} if not line.currency_id: if currency_company.id not in res[line.partner_id.id][line.account_id.type].keys(): res[line.partner_id.id][line.account_id.type][currency_company.id] = [] res[line.partner_id.id][line.account_id.type][currency_company.id].append(line) else: if line.currency_id.id not in res[line.partner_id.id][line.account_id.type].keys(): res[line.partner_id.id][line.account_id.type][line.currency_id.id] = [] res[line.partner_id.id][line.account_id.type][line.currency_id.id].append(line) #Sort by name (alphabetically). In aeroo template iterate in browse #record that it's already sort. This method return the dictionary with #move_lines and a list of partners sorted. (ids) partner_ids_order = self.pool.get('res.partner').search(cr, uid, [('id','in', partner_ids)], order='name ASC') return res, partner_ids_order """ 2. Return the value for debit or credit for each line @param line: A account_move_line line object """ def define_debit_credit(self, line): debit = 0.0 credit = 0.0 res = {'debit':debit, 'credit':credit} currency_company = self.pool.get('res.users').browse(self.cr, self.uid, [self.uid])[0].company_id.currency_id #Use amount_currency in this case if (line.currency_id != currency_company) and line.currency_id: if line.amount_currency > 0: debit = line.amount_currency elif line.amount_currency < 0: credit = line.amount_currency * -1 res.update({'debit':debit, 'credit':credit}) #currency_id = False, company_id currency else: res.update({'debit':line.debit, 'credit':line.credit}) return res """ 3. Compute debit and credit column for each lines block @param lines: A list of lines. """ def compute_total_debit_credit(self, lines): debit = 0.0 credit = 0.0 res = {'debit':debit, 'credit':credit} currency_company = self.pool.get('res.users').browse(self.cr, self.uid, [self.uid])[0].company_id.currency_id for line in lines: #Use amount_currency in this case if (line.currency_id != currency_company) and line.currency_id: if line.amount_currency > 0: debit += line.amount_currency elif line.amount_currency < 0: credit += line.amount_currency * -1 #currency_id = False, company_id currency else: debit += line.debit credit += line.credit res.update({'debit':debit, 'credit':credit}) return res """ Compute total by type @param currency: A dictionary, it contains the type and amount per type """ def compute_total_currency_company(self, partner,type): debit = 0.0 credit = 0.0 amount = 0.0 currency_company = self.pool.get('res.users').browse(self.cr, self.uid, [self.uid])[0].company_id.currency_id.id type_dict = self.get_data_by_partner(partner)[type] for currency, lines in type_dict.iteritems(): result = self.compute_total_debit_credit(lines) #Compute all data for each type. #If type's currency is different from currency_company #the amount must be converted to currency_company if currency != currency_company: amount_to_convert = result['debit'] - result['credit'] amount += self.currency_convert_amount(self.cr, self.uid, currency, currency_company, amount_to_convert) else: subtotal = result['debit'] - result['credit'] amount += subtotal return amount def compute_exchange_rate(self, partner,type, context): debit = 0.0 credit = 0.0 amount = 0.0 currency_company = self.pool.get('res.users').browse(self.cr, self.uid, [self.uid])[0].company_id.currency_id type_dict = self.get_data_by_partner(partner)[type] for currency, lines in type_dict.iteritems(): currency_current = self.pool.get('res.currency').browse(self.cr, self.uid, currency) conversion_rate_str = self.get_conversion_rate(self.cr, self.uid,currency_current, currency_company, context) return conversion_rate_str #================ AUXILIAR FUNCTIONS ======================================= """ Get exchange rate for today between currency and company's currency for a specific amount. @param initial_currency: it must be a currency id @param final_currency: it must be a currency id @param amount: amount to convert """ def currency_convert_amount(self, cr, uid, initial_currency, final_currency, amount, context=None): res_currency = self.pool.get('res.currency') exchange_rate = res_currency.compute(cr, uid, initial_currency, final_currency, amount) return exchange_rate """ Return a specific name for a id, this method if for print the name in aeroo template @param id: id . It must be a number @param type: the type of register that matches with the id. A model for OpenERP system @param type_name: Specific parameter for account's type. In this case, it could be receivable or payable """ def get_print_name(self, id, type='', type_name='', context=None): if type == 'partner': partner = self.pool.get('res.partner').browse(self.cr, self.uid, id) return partner.name #for this case, 'id' parameter is a string if type == 'account_type': if id == 'payable': return _('Payable') elif id == 'receivable': return _('Receivable') if type == 'currency': return self.pool.get('res.currency').browse(self.cr, self.uid, id, context=context).name """ Return a conversion rate for today's date @param initial_currency: It must be a browse record @param final_currency: It must be a browse record """ def get_conversion_rate(self, cr, uid, initial_currency, final_currency, context): copy_context = context now = time.strftime('%Y-%m-%d') conversion_rate = initial_currency.get_exchange_rate(final_currency, now) now = time.strftime('%d-%m-%Y') conversion_rate_str = now + ' '+ final_currency.symbol + ' ' + str(conversion_rate[0]) return conversion_rate_str #================== METHODS TO SET AND GET DATA ===========================# """ Set data to use in odt template """ def set_data_template(self, objects): result, partner_ids_order = self.get_account_move_lines(self.cr, self.uid, objects,context=None) dict_update = {'result': result, 'partner_ids_order': partner_ids_order,} self.localcontext['storage'].update(dict_update) return False """ Return a dictionary, with this structure: result[account_type][currency] = move_list_lines """ def get_data_by_partner(self, partner_id): return self.localcontext['storage']['result'][partner_id] """ Avoid to show partners without lines and partner that doesn't match with currency """ def partner_in_dictionary(self, partner): if partner in self.localcontext['storage']['result'].keys(): return True else: return False #Error message for report def error_message(self): return _("For this partner, doesn't exist payable or receivable pending invoices ") #Return company for logged user def get_currency_company_user_name(self): currency_company = self.pool.get('res.users').browse(self.cr, self.uid, [self.uid])[0].company_id.currency_id.name return currency_company def get_company_user(self): currency_company = self.pool.get('res.users').browse(self.cr, self.uid, [self.uid])[0].company_id.name return currency_company class report_partnerledger(osv.AbstractModel): _name = 'report.account_partner_balance_report.report_partner_balance' _inherit = 'report.abstract_report' _template = 'account_partner_balance_report.report_partner_balance' _wrapped_report_class = Parser
phoebusliang/parallel-lettuce
refs/heads/master
tests/integration/lib/Django-1.3/django/contrib/sessions/backends/cached_db.py
270
""" Cached, database-backed sessions. """ from django.conf import settings from django.contrib.sessions.backends.db import SessionStore as DBStore from django.core.cache import cache class SessionStore(DBStore): """ Implements cached, database backed sessions. """ def __init__(self, session_key=None): super(SessionStore, self).__init__(session_key) def load(self): data = cache.get(self.session_key, None) if data is None: data = super(SessionStore, self).load() cache.set(self.session_key, data, settings.SESSION_COOKIE_AGE) return data def exists(self, session_key): return super(SessionStore, self).exists(session_key) def save(self, must_create=False): super(SessionStore, self).save(must_create) cache.set(self.session_key, self._session, settings.SESSION_COOKIE_AGE) def delete(self, session_key=None): super(SessionStore, self).delete(session_key) cache.delete(session_key or self.session_key) def flush(self): """ Removes the current session data from the database and regenerates the key. """ self.clear() self.delete(self.session_key) self.create()
jkshaver/virtualenv-1.8.2
refs/heads/master
env/lib/python2.7/site-packages/django/template/defaulttags.py
50
"""Default tags used by the template system, available to all templates.""" import sys import re from datetime import datetime from itertools import groupby, cycle as itertools_cycle from django.conf import settings from django.template.base import (Node, NodeList, Template, Library, TemplateSyntaxError, VariableDoesNotExist, InvalidTemplateLibrary, BLOCK_TAG_START, BLOCK_TAG_END, VARIABLE_TAG_START, VARIABLE_TAG_END, SINGLE_BRACE_START, SINGLE_BRACE_END, COMMENT_TAG_START, COMMENT_TAG_END, VARIABLE_ATTRIBUTE_SEPARATOR, get_library, token_kwargs, kwarg_re) from django.template.smartif import IfParser, Literal from django.template.defaultfilters import date from django.utils.encoding import smart_str, smart_unicode from django.utils.safestring import mark_safe from django.utils import timezone register = Library() class AutoEscapeControlNode(Node): """Implements the actions of the autoescape tag.""" def __init__(self, setting, nodelist): self.setting, self.nodelist = setting, nodelist def render(self, context): old_setting = context.autoescape context.autoescape = self.setting output = self.nodelist.render(context) context.autoescape = old_setting if self.setting: return mark_safe(output) else: return output class CommentNode(Node): def render(self, context): return '' class CsrfTokenNode(Node): def render(self, context): csrf_token = context.get('csrf_token', None) if csrf_token: if csrf_token == 'NOTPROVIDED': return mark_safe(u"") else: return mark_safe(u"<div style='display:none'><input type='hidden' name='csrfmiddlewaretoken' value='%s' /></div>" % csrf_token) else: # It's very probable that the token is missing because of # misconfiguration, so we raise a warning from django.conf import settings if settings.DEBUG: import warnings warnings.warn("A {% csrf_token %} was used in a template, but the context did not provide the value. This is usually caused by not using RequestContext.") return u'' class CycleNode(Node): def __init__(self, cyclevars, variable_name=None, silent=False): self.cyclevars = cyclevars self.variable_name = variable_name self.silent = silent def render(self, context): if self not in context.render_context: # First time the node is rendered in template context.render_context[self] = itertools_cycle(self.cyclevars) cycle_iter = context.render_context[self] value = cycle_iter.next().resolve(context) if self.variable_name: context[self.variable_name] = value if self.silent: return '' return value class DebugNode(Node): def render(self, context): from pprint import pformat output = [pformat(val) for val in context] output.append('\n\n') output.append(pformat(sys.modules)) return ''.join(output) class FilterNode(Node): def __init__(self, filter_expr, nodelist): self.filter_expr, self.nodelist = filter_expr, nodelist def render(self, context): output = self.nodelist.render(context) # Apply filters. context.update({'var': output}) filtered = self.filter_expr.resolve(context) context.pop() return filtered class FirstOfNode(Node): def __init__(self, vars): self.vars = vars def render(self, context): for var in self.vars: value = var.resolve(context, True) if value: return smart_unicode(value) return u'' class ForNode(Node): child_nodelists = ('nodelist_loop', 'nodelist_empty') def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None): self.loopvars, self.sequence = loopvars, sequence self.is_reversed = is_reversed self.nodelist_loop = nodelist_loop if nodelist_empty is None: self.nodelist_empty = NodeList() else: self.nodelist_empty = nodelist_empty def __repr__(self): reversed_text = self.is_reversed and ' reversed' or '' return "<For Node: for %s in %s, tail_len: %d%s>" % \ (', '.join(self.loopvars), self.sequence, len(self.nodelist_loop), reversed_text) def __iter__(self): for node in self.nodelist_loop: yield node for node in self.nodelist_empty: yield node def render(self, context): if 'forloop' in context: parentloop = context['forloop'] else: parentloop = {} context.push() try: values = self.sequence.resolve(context, True) except VariableDoesNotExist: values = [] if values is None: values = [] if not hasattr(values, '__len__'): values = list(values) len_values = len(values) if len_values < 1: context.pop() return self.nodelist_empty.render(context) nodelist = NodeList() if self.is_reversed: values = reversed(values) unpack = len(self.loopvars) > 1 # Create a forloop value in the context. We'll update counters on each # iteration just below. loop_dict = context['forloop'] = {'parentloop': parentloop} for i, item in enumerate(values): # Shortcuts for current loop iteration number. loop_dict['counter0'] = i loop_dict['counter'] = i+1 # Reverse counter iteration numbers. loop_dict['revcounter'] = len_values - i loop_dict['revcounter0'] = len_values - i - 1 # Boolean values designating first and last times through loop. loop_dict['first'] = (i == 0) loop_dict['last'] = (i == len_values - 1) pop_context = False if unpack: # If there are multiple loop variables, unpack the item into # them. try: unpacked_vars = dict(zip(self.loopvars, item)) except TypeError: pass else: pop_context = True context.update(unpacked_vars) else: context[self.loopvars[0]] = item # In TEMPLATE_DEBUG mode provide source of the node which # actually raised the exception if settings.TEMPLATE_DEBUG: for node in self.nodelist_loop: try: nodelist.append(node.render(context)) except Exception, e: if not hasattr(e, 'django_template_source'): e.django_template_source = node.source raise else: for node in self.nodelist_loop: nodelist.append(node.render(context)) if pop_context: # The loop variables were pushed on to the context so pop them # off again. This is necessary because the tag lets the length # of loopvars differ to the length of each set of items and we # don't want to leave any vars from the previous loop on the # context. context.pop() context.pop() return nodelist.render(context) class IfChangedNode(Node): child_nodelists = ('nodelist_true', 'nodelist_false') def __init__(self, nodelist_true, nodelist_false, *varlist): self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false self._last_seen = None self._varlist = varlist self._id = str(id(self)) def render(self, context): if 'forloop' in context and self._id not in context['forloop']: self._last_seen = None context['forloop'][self._id] = 1 try: if self._varlist: # Consider multiple parameters. This automatically behaves # like an OR evaluation of the multiple variables. compare_to = [var.resolve(context, True) for var in self._varlist] else: compare_to = self.nodelist_true.render(context) except VariableDoesNotExist: compare_to = None if compare_to != self._last_seen: self._last_seen = compare_to content = self.nodelist_true.render(context) return content elif self.nodelist_false: return self.nodelist_false.render(context) return '' class IfEqualNode(Node): child_nodelists = ('nodelist_true', 'nodelist_false') def __init__(self, var1, var2, nodelist_true, nodelist_false, negate): self.var1, self.var2 = var1, var2 self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false self.negate = negate def __repr__(self): return "<IfEqualNode>" def render(self, context): val1 = self.var1.resolve(context, True) val2 = self.var2.resolve(context, True) if (self.negate and val1 != val2) or (not self.negate and val1 == val2): return self.nodelist_true.render(context) return self.nodelist_false.render(context) class IfNode(Node): def __init__(self, conditions_nodelists): self.conditions_nodelists = conditions_nodelists def __repr__(self): return "<IfNode>" def __iter__(self): for _, nodelist in self.conditions_nodelists: for node in nodelist: yield node @property def nodelist(self): return NodeList(node for _, nodelist in self.conditions_nodelists for node in nodelist) def render(self, context): for condition, nodelist in self.conditions_nodelists: if condition is not None: # if / elif clause try: match = condition.eval(context) except VariableDoesNotExist: match = None else: # else clause match = True if match: return nodelist.render(context) return '' class RegroupNode(Node): def __init__(self, target, expression, var_name): self.target, self.expression = target, expression self.var_name = var_name def resolve_expression(self, obj, context): # This method is called for each object in self.target. See regroup() # for the reason why we temporarily put the object in the context. context[self.var_name] = obj return self.expression.resolve(context, True) def render(self, context): obj_list = self.target.resolve(context, True) if obj_list == None: # target variable wasn't found in context; fail silently. context[self.var_name] = [] return '' # List of dictionaries in the format: # {'grouper': 'key', 'list': [list of contents]}. context[self.var_name] = [ {'grouper': key, 'list': list(val)} for key, val in groupby(obj_list, lambda obj: self.resolve_expression(obj, context)) ] return '' def include_is_allowed(filepath): for root in settings.ALLOWED_INCLUDE_ROOTS: if filepath.startswith(root): return True return False class SsiNode(Node): def __init__(self, filepath, parsed, legacy_filepath=True): self.filepath = filepath self.parsed = parsed self.legacy_filepath = legacy_filepath def render(self, context): filepath = self.filepath if not self.legacy_filepath: filepath = filepath.resolve(context) if not include_is_allowed(filepath): if settings.DEBUG: return "[Didn't have permission to include file]" else: return '' # Fail silently for invalid includes. try: fp = open(filepath, 'r') output = fp.read() fp.close() except IOError: output = '' if self.parsed: try: t = Template(output, name=filepath) return t.render(context) except TemplateSyntaxError, e: if settings.DEBUG: return "[Included template had syntax error: %s]" % e else: return '' # Fail silently for invalid included templates. return output class LoadNode(Node): def render(self, context): return '' class NowNode(Node): def __init__(self, format_string): self.format_string = format_string def render(self, context): tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None return date(datetime.now(tz=tzinfo), self.format_string) class SpacelessNode(Node): def __init__(self, nodelist): self.nodelist = nodelist def render(self, context): from django.utils.html import strip_spaces_between_tags return strip_spaces_between_tags(self.nodelist.render(context).strip()) class TemplateTagNode(Node): mapping = {'openblock': BLOCK_TAG_START, 'closeblock': BLOCK_TAG_END, 'openvariable': VARIABLE_TAG_START, 'closevariable': VARIABLE_TAG_END, 'openbrace': SINGLE_BRACE_START, 'closebrace': SINGLE_BRACE_END, 'opencomment': COMMENT_TAG_START, 'closecomment': COMMENT_TAG_END, } def __init__(self, tagtype): self.tagtype = tagtype def render(self, context): return self.mapping.get(self.tagtype, '') class URLNode(Node): def __init__(self, view_name, args, kwargs, asvar, legacy_view_name=True): self.view_name = view_name self.legacy_view_name = legacy_view_name self.args = args self.kwargs = kwargs self.asvar = asvar def render(self, context): from django.core.urlresolvers import reverse, NoReverseMatch args = [arg.resolve(context) for arg in self.args] kwargs = dict([(smart_str(k, 'ascii'), v.resolve(context)) for k, v in self.kwargs.items()]) view_name = self.view_name if not self.legacy_view_name: view_name = view_name.resolve(context) # Try to look up the URL twice: once given the view name, and again # relative to what we guess is the "main" app. If they both fail, # re-raise the NoReverseMatch unless we're using the # {% url ... as var %} construct in which cause return nothing. url = '' try: url = reverse(view_name, args=args, kwargs=kwargs, current_app=context.current_app) except NoReverseMatch, e: if settings.SETTINGS_MODULE: project_name = settings.SETTINGS_MODULE.split('.')[0] try: url = reverse(project_name + '.' + view_name, args=args, kwargs=kwargs, current_app=context.current_app) except NoReverseMatch: if self.asvar is None: # Re-raise the original exception, not the one with # the path relative to the project. This makes a # better error message. raise e else: if self.asvar is None: raise e if self.asvar: context[self.asvar] = url return '' else: return url class WidthRatioNode(Node): def __init__(self, val_expr, max_expr, max_width): self.val_expr = val_expr self.max_expr = max_expr self.max_width = max_width def render(self, context): try: value = self.val_expr.resolve(context) max_value = self.max_expr.resolve(context) max_width = int(self.max_width.resolve(context)) except VariableDoesNotExist: return '' except ValueError: raise TemplateSyntaxError("widthratio final argument must be an number") try: value = float(value) max_value = float(max_value) ratio = (value / max_value) * max_width except ZeroDivisionError: return '0' except ValueError: return '' return str(int(round(ratio))) class WithNode(Node): def __init__(self, var, name, nodelist, extra_context=None): self.nodelist = nodelist # var and name are legacy attributes, being left in case they are used # by third-party subclasses of this Node. self.extra_context = extra_context or {} if name: self.extra_context[name] = var def __repr__(self): return "<WithNode>" def render(self, context): values = dict([(key, val.resolve(context)) for key, val in self.extra_context.iteritems()]) context.update(values) output = self.nodelist.render(context) context.pop() return output @register.tag def autoescape(parser, token): """ Force autoescape behavior for this block. """ args = token.contents.split() if len(args) != 2: raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.") arg = args[1] if arg not in (u'on', u'off'): raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'") nodelist = parser.parse(('endautoescape',)) parser.delete_first_token() return AutoEscapeControlNode((arg == 'on'), nodelist) @register.tag def comment(parser, token): """ Ignores everything between ``{% comment %}`` and ``{% endcomment %}``. """ parser.skip_past('endcomment') return CommentNode() @register.tag def cycle(parser, token): """ Cycles among the given strings each time this tag is encountered. Within a loop, cycles among the given strings each time through the loop:: {% for o in some_list %} <tr class="{% cycle 'row1' 'row2' %}"> ... </tr> {% endfor %} Outside of a loop, give the values a unique name the first time you call it, then use that name each sucessive time through:: <tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr> <tr class="{% cycle rowcolors %}">...</tr> <tr class="{% cycle rowcolors %}">...</tr> You can use any number of values, separated by spaces. Commas can also be used to separate values; if a comma is used, the cycle values are interpreted as literal strings. The optional flag "silent" can be used to prevent the cycle declaration from returning any value:: {% cycle 'row1' 'row2' as rowcolors silent %}{# no value here #} {% for o in some_list %} <tr class="{% cycle rowcolors %}">{# first value will be "row1" #} ... </tr> {% endfor %} """ # Note: This returns the exact same node on each {% cycle name %} call; # that is, the node object returned from {% cycle a b c as name %} and the # one returned from {% cycle name %} are the exact same object. This # shouldn't cause problems (heh), but if it does, now you know. # # Ugly hack warning: This stuffs the named template dict into parser so # that names are only unique within each template (as opposed to using # a global variable, which would make cycle names have to be unique across # *all* templates. args = token.split_contents() if len(args) < 2: raise TemplateSyntaxError("'cycle' tag requires at least two arguments") if ',' in args[1]: # Backwards compatibility: {% cycle a,b %} or {% cycle a,b as foo %} # case. args[1:2] = ['"%s"' % arg for arg in args[1].split(",")] if len(args) == 2: # {% cycle foo %} case. name = args[1] if not hasattr(parser, '_namedCycleNodes'): raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name) if not name in parser._namedCycleNodes: raise TemplateSyntaxError("Named cycle '%s' does not exist" % name) return parser._namedCycleNodes[name] as_form = False if len(args) > 4: # {% cycle ... as foo [silent] %} case. if args[-3] == "as": if args[-1] != "silent": raise TemplateSyntaxError("Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1]) as_form = True silent = True args = args[:-1] elif args[-2] == "as": as_form = True silent = False if as_form: name = args[-1] values = [parser.compile_filter(arg) for arg in args[1:-2]] node = CycleNode(values, name, silent=silent) if not hasattr(parser, '_namedCycleNodes'): parser._namedCycleNodes = {} parser._namedCycleNodes[name] = node else: values = [parser.compile_filter(arg) for arg in args[1:]] node = CycleNode(values) return node @register.tag def csrf_token(parser, token): return CsrfTokenNode() @register.tag def debug(parser, token): """ Outputs a whole load of debugging information, including the current context and imported modules. Sample usage:: <pre> {% debug %} </pre> """ return DebugNode() @register.tag('filter') def do_filter(parser, token): """ Filters the contents of the block through variable filters. Filters can also be piped through each other, and they can have arguments -- just like in variable syntax. Sample usage:: {% filter force_escape|lower %} This text will be HTML-escaped, and will appear in lowercase. {% endfilter %} Note that the ``escape`` and ``safe`` filters are not acceptable arguments. Instead, use the ``autoescape`` tag to manage autoescaping for blocks of template code. """ _, rest = token.contents.split(None, 1) filter_expr = parser.compile_filter("var|%s" % (rest)) for func, unused in filter_expr.filters: if getattr(func, '_decorated_function', func).__name__ in ('escape', 'safe'): raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % func.__name__) nodelist = parser.parse(('endfilter',)) parser.delete_first_token() return FilterNode(filter_expr, nodelist) @register.tag def firstof(parser, token): """ Outputs the first variable passed that is not False, without escaping. Outputs nothing if all the passed variables are False. Sample usage:: {% firstof var1 var2 var3 %} This is equivalent to:: {% if var1 %} {{ var1|safe }} {% else %}{% if var2 %} {{ var2|safe }} {% else %}{% if var3 %} {{ var3|safe }} {% endif %}{% endif %}{% endif %} but obviously much cleaner! You can also use a literal string as a fallback value in case all passed variables are False:: {% firstof var1 var2 var3 "fallback value" %} If you want to escape the output, use a filter tag:: {% filter force_escape %} {% firstof var1 var2 var3 "fallback value" %} {% endfilter %} """ bits = token.split_contents()[1:] if len(bits) < 1: raise TemplateSyntaxError("'firstof' statement requires at least one argument") return FirstOfNode([parser.compile_filter(bit) for bit in bits]) @register.tag('for') def do_for(parser, token): """ Loops over each item in an array. For example, to display a list of athletes given ``athlete_list``:: <ul> {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% endfor %} </ul> You can loop over a list in reverse by using ``{% for obj in list reversed %}``. You can also unpack multiple values from a two-dimensional array:: {% for key,value in dict.items %} {{ key }}: {{ value }} {% endfor %} The ``for`` tag can take an optional ``{% empty %}`` clause that will be displayed if the given array is empty or could not be found:: <ul> {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% empty %} <li>Sorry, no athletes in this list.</li> {% endfor %} <ul> The above is equivalent to -- but shorter, cleaner, and possibly faster than -- the following:: <ul> {% if althete_list %} {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% endfor %} {% else %} <li>Sorry, no athletes in this list.</li> {% endif %} </ul> The for loop sets a number of variables available within the loop: ========================== ================================================ Variable Description ========================== ================================================ ``forloop.counter`` The current iteration of the loop (1-indexed) ``forloop.counter0`` The current iteration of the loop (0-indexed) ``forloop.revcounter`` The number of iterations from the end of the loop (1-indexed) ``forloop.revcounter0`` The number of iterations from the end of the loop (0-indexed) ``forloop.first`` True if this is the first time through the loop ``forloop.last`` True if this is the last time through the loop ``forloop.parentloop`` For nested loops, this is the loop "above" the current one ========================== ================================================ """ bits = token.contents.split() if len(bits) < 4: raise TemplateSyntaxError("'for' statements should have at least four" " words: %s" % token.contents) is_reversed = bits[-1] == 'reversed' in_index = is_reversed and -3 or -2 if bits[in_index] != 'in': raise TemplateSyntaxError("'for' statements should use the format" " 'for x in y': %s" % token.contents) loopvars = re.split(r' *, *', ' '.join(bits[1:in_index])) for var in loopvars: if not var or ' ' in var: raise TemplateSyntaxError("'for' tag received an invalid argument:" " %s" % token.contents) sequence = parser.compile_filter(bits[in_index+1]) nodelist_loop = parser.parse(('empty', 'endfor',)) token = parser.next_token() if token.contents == 'empty': nodelist_empty = parser.parse(('endfor',)) parser.delete_first_token() else: nodelist_empty = None return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty) def do_ifequal(parser, token, negate): bits = list(token.split_contents()) if len(bits) != 3: raise TemplateSyntaxError("%r takes two arguments" % bits[0]) end_tag = 'end' + bits[0] nodelist_true = parser.parse(('else', end_tag)) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse((end_tag,)) parser.delete_first_token() else: nodelist_false = NodeList() val1 = parser.compile_filter(bits[1]) val2 = parser.compile_filter(bits[2]) return IfEqualNode(val1, val2, nodelist_true, nodelist_false, negate) @register.tag def ifequal(parser, token): """ Outputs the contents of the block if the two arguments equal each other. Examples:: {% ifequal user.id comment.user_id %} ... {% endifequal %} {% ifnotequal user.id comment.user_id %} ... {% else %} ... {% endifnotequal %} """ return do_ifequal(parser, token, False) @register.tag def ifnotequal(parser, token): """ Outputs the contents of the block if the two arguments are not equal. See ifequal. """ return do_ifequal(parser, token, True) class TemplateLiteral(Literal): def __init__(self, value, text): self.value = value self.text = text # for better error messages def display(self): return self.text def eval(self, context): return self.value.resolve(context, ignore_failures=True) class TemplateIfParser(IfParser): error_class = TemplateSyntaxError def __init__(self, parser, *args, **kwargs): self.template_parser = parser super(TemplateIfParser, self).__init__(*args, **kwargs) def create_var(self, value): return TemplateLiteral(self.template_parser.compile_filter(value), value) @register.tag('if') def do_if(parser, token): """ The ``{% if %}`` tag evaluates a variable, and if that variable is "true" (i.e., exists, is not empty, and is not a false boolean value), the contents of the block are output: :: {% if athlete_list %} Number of athletes: {{ athlete_list|count }} {% elif athlete_in_locker_room_list %} Athletes should be out of the locker room soon! {% else %} No athletes. {% endif %} In the above, if ``athlete_list`` is not empty, the number of athletes will be displayed by the ``{{ athlete_list|count }}`` variable. As you can see, the ``if`` tag may take one or several `` {% elif %}`` clauses, as well as an ``{% else %}`` clause that will be displayed if all previous conditions fail. These clauses are optional. ``if`` tags may use ``or``, ``and`` or ``not`` to test a number of variables or to negate a given variable:: {% if not athlete_list %} There are no athletes. {% endif %} {% if athlete_list or coach_list %} There are some athletes or some coaches. {% endif %} {% if athlete_list and coach_list %} Both atheletes and coaches are available. {% endif %} {% if not athlete_list or coach_list %} There are no athletes, or there are some coaches. {% endif %} {% if athlete_list and not coach_list %} There are some athletes and absolutely no coaches. {% endif %} Comparison operators are also available, and the use of filters is also allowed, for example:: {% if articles|length >= 5 %}...{% endif %} Arguments and operators _must_ have a space between them, so ``{% if 1>2 %}`` is not a valid if tag. All supported operators are: ``or``, ``and``, ``in``, ``not in`` ``==`` (or ``=``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``. Operator precedence follows Python. """ # {% if ... %} bits = token.split_contents()[1:] condition = TemplateIfParser(parser, bits).parse() nodelist = parser.parse(('elif', 'else', 'endif')) conditions_nodelists = [(condition, nodelist)] token = parser.next_token() # {% elif ... %} (repeatable) while token.contents.startswith('elif'): bits = token.split_contents()[1:] condition = TemplateIfParser(parser, bits).parse() nodelist = parser.parse(('elif', 'else', 'endif')) conditions_nodelists.append((condition, nodelist)) token = parser.next_token() # {% else %} (optional) if token.contents == 'else': nodelist = parser.parse(('endif',)) conditions_nodelists.append((None, nodelist)) token = parser.next_token() # {% endif %} assert token.contents == 'endif' return IfNode(conditions_nodelists) @register.tag def ifchanged(parser, token): """ Checks if a value has changed from the last iteration of a loop. The ``{% ifchanged %}`` block tag is used within a loop. It has two possible uses. 1. Checks its own rendered contents against its previous state and only displays the content if it has changed. For example, this displays a list of days, only displaying the month if it changes:: <h1>Archive for {{ year }}</h1> {% for date in days %} {% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %} <a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a> {% endfor %} 2. If given one or more variables, check whether any variable has changed. For example, the following shows the date every time it changes, while showing the hour if either the hour or the date has changed:: {% for date in days %} {% ifchanged date.date %} {{ date.date }} {% endifchanged %} {% ifchanged date.hour date.date %} {{ date.hour }} {% endifchanged %} {% endfor %} """ bits = token.contents.split() nodelist_true = parser.parse(('else', 'endifchanged')) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse(('endifchanged',)) parser.delete_first_token() else: nodelist_false = NodeList() values = [parser.compile_filter(bit) for bit in bits[1:]] return IfChangedNode(nodelist_true, nodelist_false, *values) @register.tag def ssi(parser, token): """ Outputs the contents of a given file into the page. Like a simple "include" tag, the ``ssi`` tag includes the contents of another file -- which must be specified using an absolute path -- in the current page:: {% ssi /home/html/ljworld.com/includes/right_generic.html %} If the optional "parsed" parameter is given, the contents of the included file are evaluated as template code, with the current context:: {% ssi /home/html/ljworld.com/includes/right_generic.html parsed %} """ import warnings warnings.warn('The syntax for the ssi template tag is changing. Load the `ssi` tag from the `future` tag library to start using the new behavior.', category=DeprecationWarning) bits = token.contents.split() parsed = False if len(bits) not in (2, 3): raise TemplateSyntaxError("'ssi' tag takes one argument: the path to" " the file to be included") if len(bits) == 3: if bits[2] == 'parsed': parsed = True else: raise TemplateSyntaxError("Second (optional) argument to %s tag" " must be 'parsed'" % bits[0]) return SsiNode(bits[1], parsed, legacy_filepath=True) @register.tag def load(parser, token): """ Loads a custom template tag set. For example, to load the template tags in ``django/templatetags/news/photos.py``:: {% load news.photos %} Can also be used to load an individual tag/filter from a library:: {% load byline from news %} """ bits = token.contents.split() if len(bits) >= 4 and bits[-2] == "from": try: taglib = bits[-1] lib = get_library(taglib) except InvalidTemplateLibrary, e: raise TemplateSyntaxError("'%s' is not a valid tag library: %s" % (taglib, e)) else: temp_lib = Library() for name in bits[1:-2]: if name in lib.tags: temp_lib.tags[name] = lib.tags[name] # a name could be a tag *and* a filter, so check for both if name in lib.filters: temp_lib.filters[name] = lib.filters[name] elif name in lib.filters: temp_lib.filters[name] = lib.filters[name] else: raise TemplateSyntaxError("'%s' is not a valid tag or filter in tag library '%s'" % (name, taglib)) parser.add_library(temp_lib) else: for taglib in bits[1:]: # add the library to the parser try: lib = get_library(taglib) parser.add_library(lib) except InvalidTemplateLibrary, e: raise TemplateSyntaxError("'%s' is not a valid tag library: %s" % (taglib, e)) return LoadNode() @register.tag def now(parser, token): """ Displays the date, formatted according to the given string. Uses the same format as PHP's ``date()`` function; see http://php.net/date for all the possible values. Sample usage:: It is {% now "jS F Y H:i" %} """ bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("'now' statement takes one argument") format_string = bits[1][1:-1] return NowNode(format_string) @register.tag def regroup(parser, token): """ Regroups a list of alike objects by a common attribute. This complex tag is best illustrated by use of an example: say that ``people`` is a list of ``Person`` objects that have ``first_name``, ``last_name``, and ``gender`` attributes, and you'd like to display a list that looks like: * Male: * George Bush * Bill Clinton * Female: * Margaret Thatcher * Colendeeza Rice * Unknown: * Pat Smith The following snippet of template code would accomplish this dubious task:: {% regroup people by gender as grouped %} <ul> {% for group in grouped %} <li>{{ group.grouper }} <ul> {% for item in group.list %} <li>{{ item }}</li> {% endfor %} </ul> {% endfor %} </ul> As you can see, ``{% regroup %}`` populates a variable with a list of objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the item that was grouped by; ``list`` contains the list of objects that share that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female`` and ``Unknown``, and ``list`` is the list of people with those genders. Note that ``{% regroup %}`` does not work when the list to be grouped is not sorted by the key you are grouping by! This means that if your list of people was not sorted by gender, you'd need to make sure it is sorted before using it, i.e.:: {% regroup people|dictsort:"gender" by gender as grouped %} """ firstbits = token.contents.split(None, 3) if len(firstbits) != 4: raise TemplateSyntaxError("'regroup' tag takes five arguments") target = parser.compile_filter(firstbits[1]) if firstbits[2] != 'by': raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'") lastbits_reversed = firstbits[3][::-1].split(None, 2) if lastbits_reversed[1][::-1] != 'as': raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must" " be 'as'") var_name = lastbits_reversed[0][::-1] # RegroupNode will take each item in 'target', put it in the context under # 'var_name', evaluate 'var_name'.'expression' in the current context, and # group by the resulting value. After all items are processed, it will # save the final result in the context under 'var_name', thus clearing the # temporary values. This hack is necessary because the template engine # doesn't provide a context-aware equivalent of Python's getattr. expression = parser.compile_filter(var_name + VARIABLE_ATTRIBUTE_SEPARATOR + lastbits_reversed[2][::-1]) return RegroupNode(target, expression, var_name) @register.tag def spaceless(parser, token): """ Removes whitespace between HTML tags, including tab and newline characters. Example usage:: {% spaceless %} <p> <a href="foo/">Foo</a> </p> {% endspaceless %} This example would return this HTML:: <p><a href="foo/">Foo</a></p> Only space between *tags* is normalized -- not space between tags and text. In this example, the space around ``Hello`` won't be stripped:: {% spaceless %} <strong> Hello </strong> {% endspaceless %} """ nodelist = parser.parse(('endspaceless',)) parser.delete_first_token() return SpacelessNode(nodelist) @register.tag def templatetag(parser, token): """ Outputs one of the bits used to compose template tags. Since the template system has no concept of "escaping", to display one of the bits used in template tags, you must use the ``{% templatetag %}`` tag. The argument tells which template bit to output: ================== ======= Argument Outputs ================== ======= ``openblock`` ``{%`` ``closeblock`` ``%}`` ``openvariable`` ``{{`` ``closevariable`` ``}}`` ``openbrace`` ``{`` ``closebrace`` ``}`` ``opencomment`` ``{#`` ``closecomment`` ``#}`` ================== ======= """ bits = token.contents.split() if len(bits) != 2: raise TemplateSyntaxError("'templatetag' statement takes one argument") tag = bits[1] if tag not in TemplateTagNode.mapping: raise TemplateSyntaxError("Invalid templatetag argument: '%s'." " Must be one of: %s" % (tag, TemplateTagNode.mapping.keys())) return TemplateTagNode(tag) @register.tag def url(parser, token): """ Returns an absolute URL matching given view with its parameters. This is a way to define links that aren't tied to a particular URL configuration:: {% url path.to.some_view arg1 arg2 %} or {% url path.to.some_view name1=value1 name2=value2 %} The first argument is a path to a view. It can be an absolute python path or just ``app_name.view_name`` without the project name if the view is located inside the project. Other arguments are comma-separated values that will be filled in place of positional and keyword arguments in the URL. All arguments for the URL should be present. For example if you have a view ``app_name.client`` taking client's id and the corresponding line in a URLconf looks like this:: ('^client/(\d+)/$', 'app_name.client') and this app's URLconf is included into the project's URLconf under some path:: ('^clients/', include('project_name.app_name.urls')) then in a template you can create a link for a certain client like this:: {% url app_name.client client.id %} The URL will look like ``/clients/client/123/``. """ import warnings warnings.warn('The syntax for the url template tag is changing. Load the `url` tag from the `future` tag library to start using the new behavior.', category=DeprecationWarning) bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError("'%s' takes at least one argument" " (path to a view)" % bits[0]) viewname = bits[1] args = [] kwargs = {} asvar = None bits = bits[2:] if len(bits) >= 2 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] # Backwards compatibility: check for the old comma separated format # {% url urlname arg1,arg2 %} # Initial check - that the first space separated bit has a comma in it if bits and ',' in bits[0]: check_old_format = True # In order to *really* be old format, there must be a comma # in *every* space separated bit, except the last. for bit in bits[1:-1]: if ',' not in bit: # No comma in this bit. Either the comma we found # in bit 1 was a false positive (e.g., comma in a string), # or there is a syntax problem with missing commas check_old_format = False break else: # No comma found - must be new format. check_old_format = False if check_old_format: # Confirm that this is old format by trying to parse the first # argument. An exception will be raised if the comma is # unexpected (i.e. outside of a static string). match = kwarg_re.match(bits[0]) if match: value = match.groups()[1] try: parser.compile_filter(value) except TemplateSyntaxError: bits = ''.join(bits).split(',') # Now all the bits are parsed into new format, # process them as template vars if len(bits): for bit in bits: match = kwarg_re.match(bit) if not match: raise TemplateSyntaxError("Malformed arguments to url tag") name, value = match.groups() if name: kwargs[name] = parser.compile_filter(value) else: args.append(parser.compile_filter(value)) return URLNode(viewname, args, kwargs, asvar, legacy_view_name=True) @register.tag def widthratio(parser, token): """ For creating bar charts and such, this tag calculates the ratio of a given value to a maximum value, and then applies that ratio to a constant. For example:: <img src='bar.gif' height='10' width='{% widthratio this_value max_value 100 %}' /> Above, if ``this_value`` is 175 and ``max_value`` is 200, the image in the above example will be 88 pixels wide (because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88). """ bits = token.contents.split() if len(bits) != 4: raise TemplateSyntaxError("widthratio takes three arguments") tag, this_value_expr, max_value_expr, max_width = bits return WidthRatioNode(parser.compile_filter(this_value_expr), parser.compile_filter(max_value_expr), parser.compile_filter(max_width)) @register.tag('with') def do_with(parser, token): """ Adds one or more values to the context (inside of this block) for caching and easy access. For example:: {% with total=person.some_sql_method %} {{ total }} object{{ total|pluralize }} {% endwith %} Multiple values can be added to the context:: {% with foo=1 bar=2 %} ... {% endwith %} The legacy format of ``{% with person.some_sql_method as total %}`` is still accepted. """ bits = token.split_contents() remaining_bits = bits[1:] extra_context = token_kwargs(remaining_bits, parser, support_legacy=True) if not extra_context: raise TemplateSyntaxError("%r expected at least one variable " "assignment" % bits[0]) if remaining_bits: raise TemplateSyntaxError("%r received an invalid token: %r" % (bits[0], remaining_bits[0])) nodelist = parser.parse(('endwith',)) parser.delete_first_token() return WithNode(None, None, nodelist, extra_context=extra_context)
mezz64/home-assistant
refs/heads/dev
homeassistant/components/zha/core/channels/manufacturerspecific.py
14
"""Manufacturer specific channels module for Zigbee Home Automation.""" from homeassistant.core import callback from .. import registries from ..const import ( ATTR_ATTRIBUTE_ID, ATTR_ATTRIBUTE_NAME, ATTR_VALUE, REPORT_CONFIG_ASAP, REPORT_CONFIG_MAX_INT, REPORT_CONFIG_MIN_INT, SIGNAL_ATTR_UPDATED, UNKNOWN, ) from .base import ZigbeeChannel @registries.ZIGBEE_CHANNEL_REGISTRY.register(registries.SMARTTHINGS_HUMIDITY_CLUSTER) class SmartThingsHumidity(ZigbeeChannel): """Smart Things Humidity channel.""" REPORT_CONFIG = [ { "attr": "measured_value", "config": (REPORT_CONFIG_MIN_INT, REPORT_CONFIG_MAX_INT, 50), } ] @registries.CHANNEL_ONLY_CLUSTERS.register(0xFD00) @registries.ZIGBEE_CHANNEL_REGISTRY.register(0xFD00) class OsramButton(ZigbeeChannel): """Osram button channel.""" REPORT_CONFIG = [] @registries.CHANNEL_ONLY_CLUSTERS.register(registries.PHILLIPS_REMOTE_CLUSTER) @registries.ZIGBEE_CHANNEL_REGISTRY.register(registries.PHILLIPS_REMOTE_CLUSTER) class PhillipsRemote(ZigbeeChannel): """Phillips remote channel.""" REPORT_CONFIG = [] @registries.CHANNEL_ONLY_CLUSTERS.register(0xFCC0) @registries.ZIGBEE_CHANNEL_REGISTRY.register(0xFCC0) class OppleRemote(ZigbeeChannel): """Opple button channel.""" REPORT_CONFIG = [] @registries.ZIGBEE_CHANNEL_REGISTRY.register( registries.SMARTTHINGS_ACCELERATION_CLUSTER ) class SmartThingsAcceleration(ZigbeeChannel): """Smart Things Acceleration channel.""" REPORT_CONFIG = [ {"attr": "acceleration", "config": REPORT_CONFIG_ASAP}, {"attr": "x_axis", "config": REPORT_CONFIG_ASAP}, {"attr": "y_axis", "config": REPORT_CONFIG_ASAP}, {"attr": "z_axis", "config": REPORT_CONFIG_ASAP}, ] @callback def attribute_updated(self, attrid, value): """Handle attribute updates on this cluster.""" if attrid == self.value_attribute: self.async_send_signal( f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", attrid, self._cluster.attributes.get(attrid, [UNKNOWN])[0], value, ) return self.zha_send_event( SIGNAL_ATTR_UPDATED, { ATTR_ATTRIBUTE_ID: attrid, ATTR_ATTRIBUTE_NAME: self._cluster.attributes.get(attrid, [UNKNOWN])[0], ATTR_VALUE: value, }, )
draugiskisprendimai/odoo
refs/heads/8.0
addons/resource/faces/resource.py
433
#@+leo-ver=4 #@+node:@file resource.py #@@language python #@<< Copyright >> #@+node:<< Copyright >> ############################################################################ # Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH # mreithinger@web.de # # This file is part of faces. # # faces is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # faces is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ############################################################################ #@-node:<< Copyright >> #@nl #@<< Imports >> #@+node:<< Imports >> import pcalendar import datetime import utils import string import bisect import plocale #@-node:<< Imports >> #@nl _is_source = True _to_datetime = pcalendar.to_datetime _ = plocale.get_gettext() #@+others #@+node:_isattrib #@+doc #@nonl # is used to find snapshot attributes #@-doc #@@code def _isattrib(obj, a): return a[0] != "_" \ and not callable(getattr(obj, a)) \ and not a.endswith("_members") \ and a not in ("name") #@-node:_isattrib #@+node:class ResourceCalendar class ResourceCalendar(object): """ The resource calendar saves the load time of a resource. Is ia sequence of time intervals of loads. An example of such a sequence is: [ (datetime.min, 0), (2006/1/1, 1.0), (2006/1/10, 0.5), (2006/1/15, 0) ] That means the resource: is free till january the first 2006 is fully booked from january the first to january 10th is half booked from january 10th to january 15th is free since january 15th """ #@ @+others #@+node:__init__ def __init__(self, src=None): if src: self.bookings = list(src.bookings) else: self.bookings = [ (datetime.datetime.min, 0) ] #@-node:__init__ #@+node:__str__ def __str__(self): return str(self.bookings) #@-node:__str__ #@+node:__repr__ def __repr__(self): return "<ResourceCalendar %s>" % (str(self)) #@-node:__repr__ #@+node:add_load def add_load(self, start, end, load): start = _to_datetime(start) end = _to_datetime(end) bookings = self.bookings # the load will be converted in an integer to avoid # rouning problems load = int(load * 10000) start_item = (start, 0) start_pos = bisect.bisect_left(bookings, start_item) left_load = 0 left_load = bookings[start_pos - 1][1] if start_pos < len(bookings) and bookings[start_pos][0] == start: prev_load = bookings[start_pos][1] if prev_load + load == left_load: del bookings[start_pos] else: bookings[start_pos] = (start, prev_load + load) start_pos += 1 else: bookings.insert(start_pos, (start, load + left_load)) start_pos += 1 item = (datetime.datetime.min, 0) for i in range(start_pos, len(bookings)): end_pos = i item = bookings[i] if item[0] >= end: break bookings[i] = (item[0], item[1] + load) else: end_pos = len(bookings) left_load = bookings[end_pos - 1][1] if item[0] == end: if item[1] == left_load: del bookings[end_pos] else: bookings.insert(end_pos, (end, left_load - load)) #@-node:add_load #@+node:end_of_booking_interval def end_of_booking_interval(self, date): date = _to_datetime(date) bookings = self.bookings date_item = (date, 999999) date_pos = bisect.bisect_left(bookings, date_item) - 1 next_date = datetime.datetime.max load = 0 try: book_item = bookings[date_pos] load = bookings[date_pos][1] / 10000.0 next_date = bookings[date_pos + 1][0] except: pass return next_date, load #@-node:end_of_booking_interval #@+node:find_free_time def find_free_time(self, start, length, load, max_load): bookings = self.bookings if isinstance(start, datetime.datetime): adjust_date = _to_datetime else: adjust_date = start.calendar.EndDate start = _to_datetime(start) load = int(load * 10000) max_load = int(max_load * 10000) lb = len(bookings) def next_possible(index): while index < lb: sd, lo = bookings[index] if lo + load <= max_load: break index += 1 sd = adjust_date(max(start, sd)) ed = sd + length end = _to_datetime(ed) index += 1 while index < lb: date, lo = bookings[index] if date >= end: #I found a good start date return None, sd if lo + load > max_load: return index + 1, None index += 1 return None, sd start_item = (start, 1000000) i = bisect.bisect_left(bookings, start_item) - 1 next_start = None while not next_start and i < lb: i, next_start = next_possible(i) assert(next_start is not None) return next_start #@-node:find_free_time #@+node:get_bookings def get_bookings(self, start, end): start = _to_datetime(start) end = _to_datetime(end) bookings = self.bookings start_item = (start, 0) start_pos = bisect.bisect_left(bookings, start_item) if start_pos >= len(bookings) or bookings[start_pos][0] > start: start_pos -= 1 end_item = (end, 0) end_pos = bisect.bisect_left(bookings, end_item) return start_pos, end_pos, bookings #@-node:get_bookings #@+node:get_load def get_load(self, date): date = _to_datetime(date) bookings = self.bookings item = (date, 100000) pos = bisect.bisect_left(bookings, item) - 1 return bookings[pos][1] / 10000.0 #@-node:get_load #@-others #@-node:class ResourceCalendar #@+node:class _ResourceBase class _ResourceBase(object): pass #@-node:class _ResourceBase #@+node:class _MetaResource class _MetaResource(type): doc_template = """ A resource class. The resources default attributes can be changed when the class ist instanciated, i.e. %(name)s(max_load=2.0) @var max_load: Specify the maximal allowed load sum of all simultaneously allocated tasks of a resource. A ME{max_load} of 1.0 (default) means the resource may be fully allocated. A ME{max_load} of 1.3 means the resource may be allocated with 30%% overtime. @var title: Specifies an alternative more descriptive name for the task. @var efficiency: The efficiency of a resource can be used for two purposes. First you can use it as a crude way to model a team. A team of 5 people should have an efficiency of 5.0. Keep in mind that you cannot track the member of the team individually if you use this feature. The other use is to model performance variations between your resources. @var vacation: Specifies the vacation of the resource. This attribute is specified as a list of date literals or date literal intervals. Be aware that the end of an interval is excluded, i.e. it is the first working date. """ #@ @+others #@+node:__init__ def __init__(self, name, bases, dict_): super(_MetaResource, self).__init__(name, bases, dict_) self.name = name self.title = dict_.get("title", name) self._calendar = { None: ResourceCalendar() } self._tasks = { } self.__set_vacation() self.__add_resource(bases[0]) self.__doc__ = dict_.get("__doc__", self.doc_template) % locals() #@-node:__init__ #@+node:__or__ def __or__(self, other): return self().__or__(other) #@-node:__or__ #@+node:__and__ def __and__(self, other): return self().__and__(other) #@-node:__and__ #@+node:__cmp__ def __cmp__(self, other): return cmp(self.name, getattr(other, "name", None)) #@-node:__cmp__ #@+node:__repr__ def __repr__(self): return "<Resource %s>" % self.name #@-node:__repr__ #@+node:__str__ def __str__(self): return repr(self) #@-node:__str__ #@+node:__set_vacation def __set_vacation(self): vacation = self.vacation if isinstance(vacation, (tuple, list)): for v in vacation: if isinstance(v, (tuple, list)): self.add_vacation(v[0], v[1]) else: self.add_vacation(v) else: self.add_vacation(vacation) #@-node:__set_vacation #@+node:__add_resource def __add_resource(self, base): if issubclass(base, _ResourceBase): members = getattr(base, base.__name__ + "_members", []) members.append(self) setattr(base, base.__name__ + "_members", members) #@-node:__add_resource #@+node:get_members def get_members(self): return getattr(self, self.__name__ + "_members", []) #@-node:get_members #@+node:add_vacation def add_vacation(self, start, end=None): start_date = _to_datetime(start) if not end: end_date = start_date.replace(hour=23, minute=59) else: end_date = _to_datetime(end) for cal in self._calendar.itervalues(): cal.add_load(start_date, end_date, 1) tp = Booking() tp.start = start_date tp.end = end_date tp.book_start = start_date tp.book_end = end_date tp.work_time = end_date - start_date tp.load = 1.0 tp.name = tp.title = _("(vacation)") tp._id = "" self._tasks.setdefault("", []).append(tp) #@-node:add_vacation #@+node:calendar def calendar(self, scenario): try: return self._calendar[scenario] except KeyError: cal = self._calendar[scenario] = ResourceCalendar(self._calendar[None]) return cal #@-node:calendar #@-others #@-node:class _MetaResource #@+node:make_team def make_team(resource): members = resource.get_members() if not members: return resource result = make_team(members[0]) for r in members[1:]: result = result & make_team(r) return result #@-node:make_team #@+node:class Booking class Booking(object): """ A booking unit for a task. """ #@ << declarations >> #@+node:<< declarations >> book_start = datetime.datetime.min book_end = datetime.datetime.max actual = False _id = "" #@-node:<< declarations >> #@nl #@ @+others #@+node:__init__ def __init__(self, task=None): self.__task = task #@-node:__init__ #@+node:__cmp__ def __cmp__(self, other): return cmp(self._id, other._id) #@-node:__cmp__ #@+node:path def path(self): first_dot = self._id.find(".") return "root" + self._id[first_dot:] path = property(path) #@nonl #@-node:path #@+node:_idendity_ def _idendity_(self): return self._id #@-node:_idendity_ #@+node:__getattr__ def __getattr__(self, name): if self.__task: return getattr(self.__task, name) raise AttributeError("'%s' is not a valid attribute" % (name)) #@-node:__getattr__ #@-others #@-node:class Booking #@+node:class ResourceList class ResourceList(list): #@ @+others #@+node:__init__ def __init__(self, *args): if args: self.extend(args) #@-node:__init__ #@-others #@-node:class ResourceList #@+node:class Resource class Resource(_ResourceBase): #@ << declarations >> #@+node:<< declarations >> __metaclass__ = _MetaResource __attrib_completions__ = {\ "max_load": 'max_load = ', "title": 'title = "|"', "efficiency": 'efficiency = ', "vacation": 'vacation = [("|2002-02-01", "2002-02-05")]' } __type_image__ = "resource16" max_load = None # the maximum sum load for all task vacation = () efficiency = 1.0 #@-node:<< declarations >> #@nl #@ @+others #@+node:__init__ def __init__(self, **kwargs): for k, v in kwargs.iteritems(): setattr(self, k, v) #@-node:__init__ #@+node:_idendity_ def _idendity_(cls): return "resource:" + cls.__name__ _idendity_ = classmethod(_idendity_) #@-node:_idendity_ #@+node:__repr__ def __repr__(self): return "<Resource %s>" % self.__class__.__name__ #@-node:__repr__ #@+node:__str__ def __str__(self): return repr(self) #@-node:__str__ #@+node:__call__ def __call__(self): return self #@-node:__call__ #@+node:__hash__ def __hash__(self): return hash(self.__class__) #@-node:__hash__ #@+node:__cmp__ def __cmp__(self, other): return cmp(self.name, other.name) #@-node:__cmp__ #@+node:__or__ def __or__(self, other): if type(other) is _MetaResource: other = other() result = Resource() result._subresource = _OrResourceGroup(self, other) return result #@-node:__or__ #@+node:__and__ def __and__(self, other): if type(other) is _MetaResource: other = other() result = Resource() result._subresource = _AndResourceGroup(self, other) return result #@-node:__and__ #@+node:_permutation_count def _permutation_count(self): if hasattr(self, "_subresource"): return self._subresource._permutation_count() return 1 #@-node:_permutation_count #@+node:_get_resources def _get_resources(self, state): if hasattr(self, "_subresource"): result = self._subresource._get_resources(state) if self.name != "Resource": result.name = self.name if self.title != "Resource": result.title = self.title return result result = ResourceList(self) return result #@-node:_get_resources #@+node:all_members def all_members(self): if hasattr(self, "_subresource"): return self._subresource.all_members() return [ self.__class__ ] #@-node:all_members #@+node:unbook_tasks_of_project def unbook_tasks_of_project(cls, project_id, scenario): try: task_list = cls._tasks[scenario] except KeyError: return add_load = cls.calendar(scenario).add_load for task_id, bookings in task_list.items(): if task_id.startswith(project_id): for item in bookings: add_load(item.book_start, item.book_end, -item.load) del task_list[task_id] if not task_list: del cls._tasks[scenario] unbook_tasks_of_project = classmethod(unbook_tasks_of_project) #@-node:unbook_tasks_of_project #@+node:unbook_task def unbook_task(cls, task): identdity = task._idendity_() scenario = task.scenario try: task_list = cls._tasks[scenario] bookings = task_list[identdity] except KeyError: return add_load = cls.calendar(scenario).add_load for b in bookings: add_load(b.book_start, b.book_end, -b.load) del task_list[identdity] if not task_list: del cls._tasks[scenario] unbook_task = classmethod(unbook_task) #@-node:unbook_task #@+node:correct_bookings def correct_bookings(cls, task): #correct the booking data with the actual task data try: tasks = cls._tasks[task.scenario][task._idendity_()] except KeyError: return for t in tasks: t.start = task.start.to_datetime() t.end = task.end.to_datetime() correct_bookings = classmethod(correct_bookings) #@-node:correct_bookings #@+node:book_task def book_task(cls, task, start, end, load, work_time, actual): if not work_time: return start = _to_datetime(start) end = _to_datetime(end) identdity = task._idendity_() task_list = cls._tasks.setdefault(task.scenario, {}) bookings = task_list.setdefault(identdity, []) add_load = cls.calendar(task.scenario).add_load tb = Booking(task) tb.book_start = start tb.book_end = end tb._id = identdity tb.load = load tb.start = _to_datetime(task.start) tb.end = _to_datetime(task.end) tb.title = task.title tb.name = task.name tb.work_time = int(work_time) tb.actual = actual bookings.append(tb) result = add_load(start, end, load) return result book_task = classmethod(book_task) #@-node:book_task #@+node:length_of def length_of(cls, task): cal = task.root.calendar bookings = cls.get_bookings(task) return sum(map(lambda b: task._to_delta(b.work_time).round(), bookings)) length_of = classmethod(length_of) #@-node:length_of #@+node:done_of def done_of(self, task): cal = task.root.calendar now = cal.now bookings = self.get_bookings(task) if task.__dict__.has_key("effort"): efficiency = self.efficiency * task.efficiency else: efficiency = 1 def book_done(booking): if booking.book_start >= now: return 0 factor = 1 if booking.book_end > now: start = task._to_start(booking.book_start) end = task._to_end(booking.book_end) cnow = task._to_start(now) factor = float(cnow - start) / ((end - start) or 1) return factor * booking.work_time * efficiency return task._to_delta(sum(map(book_done, bookings))) #@-node:done_of #@+node:todo_of def todo_of(self, task): cal = task.root.calendar now = cal.now bookings = self.get_bookings(task) if task.__dict__.has_key("effort"): efficiency = self.efficiency * task.efficiency else: efficiency = 1 def book_todo(booking): if booking.book_end <= now: return 0 factor = 1 if booking.book_start < now: start = task._to_start(booking.book_start) end = task._to_end(booking.book_end) cnow = task._to_start(now) factor = float(end - cnow) / ((end - start) or 1) return factor * booking.work_time * efficiency return task._to_delta(sum(map(book_todo, bookings))) #@-node:todo_of #@+node:get_bookings def get_bookings(cls, task): return cls._tasks.get(task.scenario, {}).get(task._idendity_(), ()) get_bookings = classmethod(get_bookings) #@-node:get_bookings #@+node:get_bookings_at def get_bookings_at(cls, start, end, scenario): result = [] try: items = cls._tasks[scenario].iteritems() except KeyError: return () for task_id, bookings in items: result += [ booking for booking in bookings if booking.book_start < end and booking.book_end > start ] vacations = cls._tasks.get("", ()) result += [ booking for booking in vacations if booking.book_start < end and booking.book_end > start ] return result get_bookings_at = classmethod(get_bookings_at) #@-node:get_bookings_at #@+node:find_free_time def find_free_time(cls, start, length, load, max_load, scenario): return cls.calendar(scenario).find_free_time(start, length, load, max_load) find_free_time = classmethod(find_free_time) #@-node:find_free_time #@+node:get_load def get_load(cls, date, scenario): return cls.calendar(scenario).get_load(date) get_load = classmethod(get_load) #@-node:get_load #@+node:end_of_booking_interval def end_of_booking_interval(cls, date, task): return cls.calendar(task.scenario).end_of_booking_interval(date) end_of_booking_interval = classmethod(end_of_booking_interval) #@-node:end_of_booking_interval #@+node:snapshot def snapshot(self): from task import _as_string def isattrib(a): if a == "max_load" and self.max_load is None: return False if a in ("name", "title", "vacation"): return False return _isattrib(self, a) attribs = filter(isattrib, dir(self)) attribs = map(lambda a: "%s=%s" % (a, _as_string(getattr(self, a))), attribs) return self.name + "(%s)" % ", ".join(attribs) #@-node:snapshot #@-others #@-node:class Resource #@+node:class _ResourceGroup class _ResourceGroup(object): #@ @+others #@+node:__init__ def __init__(self, *args): self.resources = [] for a in args: self.__append(a) #@-node:__init__ #@+node:all_members def all_members(self): group = reduce(lambda a, b: a + b.all_members(), self.resources, []) group = map(lambda r: (r, True), group) group = dict(group) group = group.keys() return group #@-node:all_members #@+node:_permutation_count def _permutation_count(self): abstract #@-node:_permutation_count #@+node:_refactor def _refactor(self, arg): pass #@-node:_refactor #@+node:__append def __append(self, arg): if isinstance(arg, self.__class__): self.resources += arg.resources for r in arg.resources: self._refactor(r) return elif isinstance(arg, Resource): subresources = getattr(arg, "_subresource", None) if subresources: self.__append(subresources) return else: self.resources.append(arg) else: assert(isinstance(arg, _ResourceGroup)) self.resources.append(arg) self._refactor(arg) #@-node:__append #@+node:__str__ def __str__(self): op = lower(self.__class__.__name__[0:-13]) return "(" + \ string.join([str(r) for r in self.resources], " " + op + " ") + \ ")" #@-node:__str__ #@-others #@-node:class _ResourceGroup #@+node:class _OrResourceGroup class _OrResourceGroup(_ResourceGroup): #@ @+others #@+node:_get_resources def _get_resources(self, state): for r in self.resources: c = r._permutation_count() if c <= state: state -= c else: return r._get_resources(state) assert(0) #@-node:_get_resources #@+node:_permutation_count def _permutation_count(self): return sum([ r._permutation_count() for r in self.resources]) #@-node:_permutation_count #@-others #@-node:class _OrResourceGroup #@+node:class _AndResourceGroup class _AndResourceGroup(_ResourceGroup): #@ @+others #@+node:__init__ def __init__(self, *args): self.factors = [ 1 ] _ResourceGroup.__init__(self, *args) #@-node:__init__ #@+node:_refactor def _refactor(self, arg): count = arg._permutation_count() self.factors = [ count * f for f in self.factors ] self.factors.append(1) #@-node:_refactor #@+node:_permutation_count #print "AndResourceGroup", count, arg, self.factors def _permutation_count(self): return self.factors[0] #@-node:_permutation_count #@+node:_get_resources def _get_resources(self, state): """delivers None when there are duplicate resources""" result = [] for i in range(1, len(self.factors)): f = self.factors[i] substate = state / f state %= f result.append(self.resources[i - 1]._get_resources(substate)) result = ResourceList(*list(utils.flatten(result))) dupl_test = { } for r in result: if dupl_test.has_key(r): return None else: dupl_test[r] = 1 return result #@-node:_get_resources #@+node:_has_duplicates def _has_duplicates(self, state): resources = self._get_resources(state) tmp = { } for r in resources: if tmp.has_key(r): return True tmp[r] = 1 return False #@-node:_has_duplicates #@-others #@-node:class _AndResourceGroup #@-others #@-node:@file resource.py #@-leo # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
p0psicles/SickGear
refs/heads/master
lib/sqlalchemy/testing/runner.py
79
#!/usr/bin/env python # testing/runner.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Nose test runner module. This script is a front-end to "nosetests" which installs SQLAlchemy's testing plugin into the local environment. The script is intended to be used by third-party dialects and extensions that run within SQLAlchemy's testing framework. The runner can be invoked via:: python -m sqlalchemy.testing.runner The script is then essentially the same as the "nosetests" script, including all of the usual Nose options. The test environment requires that a setup.cfg is locally present including various required options. Note that when using this runner, Nose's "coverage" plugin will not be able to provide coverage for SQLAlchemy itself, since SQLAlchemy is imported into sys.modules before coverage is started. The special script sqla_nose.py is provided as a top-level script which loads the plugin in a special (somewhat hacky) way so that coverage against SQLAlchemy itself is possible. """ from sqlalchemy.testing.plugin.noseplugin import NoseSQLAlchemy import nose def main(): nose.main(addplugins=[NoseSQLAlchemy()]) def setup_py_test(): """Runner to use for the 'test_suite' entry of your setup.py. Prevents any name clash shenanigans from the command line argument "test" that the "setup.py test" command sends to nose. """ nose.main(addplugins=[NoseSQLAlchemy()], argv=['runner'])
jptomo/rpython-lang-scheme
refs/heads/master
rpython/jit/metainterp/counter.py
2
from rpython.rlib.rarithmetic import r_singlefloat, r_uint from rpython.rtyper.lltypesystem import lltype, rffi from rpython.translator.tool.cbuild import ExternalCompilationInfo r_uint32 = rffi.r_uint assert r_uint32.BITS == 32 UINT32MAX = 2 ** 32 - 1 # keep in sync with the C code in pypy__decay_jit_counters below ENTRY = lltype.Struct('timetable_entry', ('times', lltype.FixedSizeArray(rffi.FLOAT, 5)), ('subhashes', lltype.FixedSizeArray(rffi.USHORT, 5))) class JitCounter: """A process translated with the JIT contains one prebuilt instance of this class. It is used for three things: * It maps greenkey hashes to counters, to know when we have seen this greenkey enough to reach the 'threshold' or 'function_threshold' parameters. This is done in a lossy way by a fixed-size 'timetable'. * It handles the counters on the failing guards, for 'trace_eagerness'. This is done in the same 'timetable'. * It records the JitCell objects that are created when we compile a loop, in a non-lossy dictionary-like strurcture. This is done in the 'celltable'. The 'timetable' is a table of DEFAULT_SIZE entries, each of which containing 5 entries. From a hash value, we use the index number '_get_index(hash)', and then we look in all five entries for a matching '_get_subhash(hash)'. The five entries are roughly kept sorted by decreasing recorded time. The hash value itself should be computed accordingly: we only use bits 21:32 for _get_index and bits 0:16 for _get_subhash. (This organization is "probably good" to get not-too-random behavior; another motivation for it was for the STM branch, to avoid pointless conflicts between threads.) The time value stored in the timetable is a (short-precision) floating-point number. The idea is that a value of 0.0 means absent, and values go up to the maximum of 1.0. 'compute_threshold(threshold)' returns basically the fraction 1.0/threshold, corresponding to the 'increment' value for the following APIs. 'tick(hash, increment)' adds 'increment' to the time value stored with the 'hash'. Remember that only bits 0:16,21:32 of the hash are used; in case of collision between two hashes, they will grow twice as fast, because each tick() call will contribute to the colliding time value. 'fetch_next_hash()' returns a "random" hash value suitable for using in tick() later. Used when compiling guards; when the guard actually fails, we'll tick() the guard's stored random hash. 'reset(hash)', 'change_current_fraction(hash, new_time_value)' change the time value associated with a hash. The former resets it to zero, and the latter changes it to the given value (which should be a value close to 1.0). 'set_decay(decay)', 'decay_all_counters()' is used to globally reduce all the stored time values. They all get multiplied by a fraction close to (but smaller than) 1.0, computed from the 'decay' parameter. 'install_new_cell(hash, newcell)' adds the new JitCell to the celltable, at the index given by 'hash' (bits 21:32). Unlike the timetable, the celltable stores a linked list of JitCells for every entry, and so it is not lossy. 'lookup_chain(hash)' returns the first JitCell at 'hash'. You can then walk the chain by following the '.next' attributes until you reach None. 'cleanup_chain(hash)' resets the timetable's 'hash' entry and cleans up the celltable at 'hash'. It removes those JitCells for which 'cell.should_remove_jitcell()' returns True. """ DEFAULT_SIZE = 2048 def __init__(self, size=DEFAULT_SIZE, translator=None): "NOT_RPYTHON" self.size = size self.shift = 16 while (UINT32MAX >> self.shift) != size - 1: self.shift += 1 assert self.shift < 999, "size is not a power of two <= 2**16" # # The table of timings. This is a 5-ways associative cache. # We index into it using a number between 0 and (size - 1), # and we're getting a 32-bytes-long entry; then this entry # contains 5 possible ways, each occupying 6 bytes: 4 bytes # for a float, and the 2 lowest bytes from the original hash. self.timetable = lltype.malloc(rffi.CArray(ENTRY), self.size, flavor='raw', zero=True, track_allocation=False) self._nexthash = r_uint(0) # # The table of JitCell entries, recording already-compiled loops self.celltable = [None] * size # if translator is not None: class Glob: step = 0 glob = Glob() def invoke_after_minor_collection(): # After 32 minor collections, we call decay_all_counters(). # The "--jit decay=N" option measures the amount the # counters are then reduced by. glob.step += 1 if glob.step == 32: glob.step = 0 self.decay_all_counters() if not hasattr(translator, '_jit2gc'): translator._jit2gc = {} translator._jit2gc['invoke_after_minor_collection'] = ( invoke_after_minor_collection) def compute_threshold(self, threshold): """Return the 'increment' value corresponding to the given number.""" if threshold <= 0: return 0.0 # no increment, never reach 1.0 return 1.0 / (threshold - 0.001) def _get_index(self, hash): """Return the index (< self.size) from a hash. This truncates the hash to 32 bits, and then keep the *highest* remaining bits. Be sure that hash is computed correctly, by multiplying with a large odd number or by fetch_next_hash().""" hash32 = r_uint(r_uint32(hash)) # mask off the bits higher than 32 index = hash32 >> self.shift # shift, resulting in a value < size return index # return the result as a r_uint _get_index._always_inline_ = True @staticmethod def _get_subhash(hash): return hash & 65535 def fetch_next_hash(self): result = self._nexthash # note: all three "1" bits in the following constant are needed # to make test_counter.test_fetch_next_index pass. The first # is to increment the "subhash" (lower 16 bits of the hash). # The second is to increment the "index" portion of the hash. # The third is so that after 65536 passes, the "index" is # incremented by one more (by overflow), so that the next # 65536 passes don't end up with the same subhashes. self._nexthash = result + r_uint(1 | (1 << self.shift) | (1 << (self.shift - 16))) return result def _swap(self, p_entry, n): if float(p_entry.times[n]) > float(p_entry.times[n + 1]): return n + 1 else: x = p_entry.times[n] p_entry.times[n] = p_entry.times[n + 1] p_entry.times[n + 1] = x x = p_entry.subhashes[n] p_entry.subhashes[n] = p_entry.subhashes[n + 1] p_entry.subhashes[n + 1] = x return n _swap._always_inline_ = True def _tick_slowpath(self, p_entry, subhash): if p_entry.subhashes[1] == subhash: n = self._swap(p_entry, 0) elif p_entry.subhashes[2] == subhash: n = self._swap(p_entry, 1) elif p_entry.subhashes[3] == subhash: n = self._swap(p_entry, 2) elif p_entry.subhashes[4] == subhash: n = self._swap(p_entry, 3) else: n = 4 while n > 0 and float(p_entry.times[n - 1]) == 0.0: n -= 1 p_entry.subhashes[n] = rffi.cast(rffi.USHORT, subhash) p_entry.times[n] = r_singlefloat(0.0) return n def tick(self, hash, increment): p_entry = self.timetable[self._get_index(hash)] subhash = self._get_subhash(hash) # if p_entry.subhashes[0] == subhash: n = 0 else: n = self._tick_slowpath(p_entry, subhash) # counter = float(p_entry.times[n]) + increment if counter < 1.0: p_entry.times[n] = r_singlefloat(counter) return False else: # when the bound is reached, we immediately reset the value to 0.0 self.reset(hash) return True tick._always_inline_ = True def change_current_fraction(self, hash, new_fraction): """Change the value stored for 'hash' to be the given 'new_fraction', which should be a float equal to or slightly lower than 1.0. """ p_entry = self.timetable[self._get_index(hash)] subhash = self._get_subhash(hash) # find in 'n' the index that will be overwritten: the first within # range(5) that contains either the right subhash, or a null time # (or, if there isn't any, then just n == 4 will do). n = 0 while n < 4 and (p_entry.subhashes[n] != subhash and float(p_entry.times[n]) != 0.0): n += 1 # move one step to the right all elements [n - 1, n - 2, ..., 0], # (this overwrites the old item at index 'n') while n > 0: n -= 1 p_entry.subhashes[n + 1] = p_entry.subhashes[n] p_entry.times[n + 1] = p_entry.times[n] # insert the new hash at index 0. This is a good approximation, # because change_current_fraction() should be used for # new_fraction == value close to 1.0. p_entry.subhashes[0] = rffi.cast(rffi.USHORT, subhash) p_entry.times[0] = r_singlefloat(new_fraction) def reset(self, hash): p_entry = self.timetable[self._get_index(hash)] subhash = self._get_subhash(hash) for i in range(5): if p_entry.subhashes[i] == subhash: p_entry.times[i] = r_singlefloat(0.0) def lookup_chain(self, hash): return self.celltable[self._get_index(hash)] def cleanup_chain(self, hash): self.reset(hash) self.install_new_cell(hash, None) def install_new_cell(self, hash, newcell): index = self._get_index(hash) cell = self.celltable[index] keep = newcell while cell is not None: nextcell = cell.next if not cell.should_remove_jitcell(): cell.next = keep keep = cell cell = nextcell self.celltable[index] = keep def set_decay(self, decay): """Set the decay, from 0 (none) to 1000 (max).""" if decay < 0: decay = 0 elif decay > 1000: decay = 1000 self.decay_by_mult = 1.0 - (decay * 0.001) def decay_all_counters(self): # Called during a minor collection by the GC, to gradually decay # counters that didn't reach their maximum. Thus if a counter # is incremented very slowly, it will never reach the maximum. # This avoids altogether the JIT compilation of rare paths. # We also call this function when any maximum bound is reached, # to avoid sudden bursts of JIT-compilation (the next one will # not reach the maximum bound immmediately after). This is # important in corner cases where we would suddenly compile more # than one loop because all counters reach the bound at the same # time, but where compiling all but the first one is pointless. p = rffi.cast(rffi.CCHARP, self.timetable) pypy__decay_jit_counters(p, self.decay_by_mult, self.size) # this function is written directly in C; gcc will optimize it using SSE eci = ExternalCompilationInfo(post_include_bits=[""" static void pypy__decay_jit_counters(char *data, double f1, long size) { struct rpy_jitcnt { float times[5]; unsigned short subhashes[5]; }; struct rpy_jitcnt *p = (struct rpy_jitcnt *)data; float f = (float)f1; long i; for (i=0; i<size; i++) { p->times[0] *= f; p->times[1] *= f; p->times[2] *= f; p->times[3] *= f; p->times[4] *= f; ++p; } } """]) pypy__decay_jit_counters = rffi.llexternal( "pypy__decay_jit_counters", [rffi.CCHARP, lltype.Float, lltype.Signed], lltype.Void, compilation_info=eci, _nowrapper=True, sandboxsafe=True) # ____________________________________________________________ # # A non-RPython version that avoids issues with rare random collisions, # which make all tests brittle class DeterministicJitCounter(JitCounter): def __init__(self): from collections import defaultdict JitCounter.__init__(self, size=8) def make_null_entry(): return lltype.malloc(ENTRY, immortal=True, zero=True) self.timetable = defaultdict(make_null_entry) self.celltable = defaultdict(lambda: None) def _get_index(self, hash): "NOT_RPYTHON" return hash def decay_all_counters(self): "NOT_RPYTHON" pass def _clear_all(self): self.timetable.clear() self.celltable.clear()
danielballan/scikit-xray
refs/heads/master
skbeam/core/spectroscopy.py
8
# ###################################################################### # Copyright (c) 2014, Brookhaven Science Associates, Brookhaven # # National Laboratory. All rights reserved. # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # * Redistributions in binary form must reproduce the above copyright # # notice this list of conditions and the following disclaimer in # # the documentation and/or other materials provided with the # # distribution. # # # # * Neither the name of the Brookhaven Science Associates, Brookhaven # # National Laboratory nor the names of its contributors may be used # # to endorse or promote products derived from this software without # # specific prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING # # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # ######################################################################## """ This module is for spectroscopy specific tools (spectrum fitting etc). """ from __future__ import absolute_import, division, print_function import numpy as np from six.moves import zip from scipy.integrate import simps from .fitting import fit_quad_to_peak import logging logger = logging.getLogger(__name__) def align_and_scale(energy_list, counts_list, pk_find_fun=None): """ Parameters ---------- energy_list : iterable of ndarrays list of ndarrays with the energy of each element counts_list : iterable of ndarrays list of ndarrays of counts/element pk_find_fun : function or None A function which takes two ndarrays and returns parameters about the largest peak. If None, defaults to `find_largest_peak`. For this demo, the output is (center, height, width), but this sould be pinned down better. Returns ------- out_e : list of ndarray The aligned/scaled energy arrays out_c : list of ndarray The count arrays (should be the same as the input) """ if pk_find_fun is None: pk_find_fun = find_largest_peak base_sigma = None out_e, out_c = [], [] for e, c in zip(energy_list, counts_list): E0, max_val, sigma = pk_find_fun(e, c) if base_sigma is None: base_sigma = sigma out_e.append((e - E0) * base_sigma / sigma) out_c.append(c) return out_e, out_c def find_largest_peak(x, y, window=None): """ Finds and estimates the location, width, and height of the largest peak. Assumes the top of the peak can be approximated as a Gaussian. Finds the peak properties using least-squares fitting of a parabola to the log of the counts. The region around the peak can be approximated by :math:`Y = Y0 * exp(- (X - X0)**2 / (2 * sigma **2))` Parameters ---------- x : ndarray The independent variable y : ndarary Dependent variable sampled at positions X window : int, optional The size of the window around the maximum to use for the fitting Returns ------- x0 : float The location of the peak y0 : float The magnitude of the peak sigma : float Width of the peak """ # make sure they are _really_ arrays x = np.asarray(x) y = np.asarray(y) # get the bin with the largest number of counts j = np.argmax(y) if window is not None: roi = slice(np.max(j - window, 0), j + window + 1) else: roi = slice(0, -1) (w, x0, y0), r2 = fit_quad_to_peak(x[roi], np.log(y[roi])) return x0, np.exp(y0), 1/np.sqrt(-2*w) def integrate_ROI_spectrum(bin_edges, counts, x_min, x_max): """Integrate region(s) of histogram. If `x_min` and `x_max` are arrays/lists they must be equal in length. The values contained in the 'x_value_array' must be monotonic (up or down). The returned value is the sum of all the regions and a single scalar value is returned. Each region is computed independently, if regions overlap the overlapped area will be included multiple times in the final sum. `bin_edges` is an array of the left edges and the final right edges of the bins. `counts` is the value in each of those bins. The bins who's centers fall with in the integration limits are included in the sum. Parameters ---------- bin_edges : array Independent variable, any unit. Must be one longer in length than counts counts : array Dependent variable, any units x_min : float or array The lower edge of the integration region(s). x_max : float or array The upper edge of the integration region(s). Returns ------- float The totals integrated value in same units as `counts` """ bin_edges = np.asarray(bin_edges) return integrate_ROI(bin_edges[:-1] + np.diff(bin_edges), counts, x_min, x_max) def _formatter_array_regions(x, centers, window=1, tab_count=0): """Returns a formatted string of sub-sections of an array Each value in center generates a section of the string like: {tab_count*\t}c : [x[c - n] ... x[c] ... x[c + n + 1]] Parameters ---------- x : array The array to be looked into centers : iterable The locations to print out around window : int, optional how many values on either side of center to include defaults to 1 tab_count : int, optional The number of tabs to pre-fix lines with default is 0 Returns ------- str The formatted string """ xl = len(x) x = np.asarray(x) header = ("\t"*tab_count + 'center\tarray values\n' + "\t"*tab_count + '------\t------------\n') return header + '\n'.join( ["\t"*tab_count + "{c}: \t {vals}".format( c=c, vals=x[np.max([0, c-window]):np.min([xl, c + window + 1])]) for c in centers]) def integrate_ROI(x, y, x_min, x_max): """Integrate region(s) of input data. If `x_min` and `x_max` are arrays/lists they must be equal in length. The values contained in the 'x' must be monotonic (up or down). The returned value is the sum of all the regions and a single scalar value is returned. Each region is computed independently, if regions overlap the overlapped area will be included multiple times in the final sum. This function assumes that `y` is a function of `x` sampled at `x`. Parameters ---------- x : array Independent variable, any unit y : array Dependent variable, any units x_min : float or array The lower edge of the integration region(s) in units of x. x_max : float or array The upper edge of the integration region(s) in units of x. Returns ------- float The totals integrated value in same units as `y` """ # make sure x (x-values) and y (y-values) are arrays x = np.asarray(x) y = np.asarray(y) if x.shape != y.shape: raise ValueError("Inputs (x and y) must be the same " "size. x.shape = {0} and y.shape = " "{1}".format(x.shape, y.shape)) # use np.sign() to obtain array which has evaluated sign changes in all # diff in input x_value array. Checks and tests are then run on the # evaluated sign change array. eval_x_arr_sign = np.sign(np.diff(x)) # check to make sure no outliers exist which violate the monotonically # increasing requirement, and if exceptions exist, then error points to the # location within the source array where the exception occurs. if not np.all(eval_x_arr_sign == eval_x_arr_sign[0]): error_locations = np.where(eval_x_arr_sign != eval_x_arr_sign[0])[0] raise ValueError("Independent variable must be monotonically " "increasing. Erroneous values found at x-value " "array index locations:\n" + _formatter_array_regions(x, error_locations)) # check whether the sign of all diff measures are negative in the # x. If so, then the input array for both x_values and # count are reversed so that they are positive, and monotonically increase # in value if eval_x_arr_sign[0] == -1: x = x[::-1] y = y[::-1] logging.debug("Input values for 'x' were found to be " "monotonically decreasing. The 'x' and " "'y' arrays have been reversed prior to " "integration.") # up-cast to 1d and make sure it is flat x_min = np.atleast_1d(x_min).ravel() x_max = np.atleast_1d(x_max).ravel() # verify that the number of minimum and maximum boundary values are equal if len(x_min) != len(x_max): raise ValueError("integration bounds must have same lengths") # verify that the specified minimum values are actually less than the # sister maximum value, and raise error if any minimum value is actually # greater than the sister maximum value. if np.any(x_min >= x_max): raise ValueError("All lower integration bounds must be less than " "upper integration bounds.") # check to make sure that all specified minimum and maximum values are # actually contained within the extents of the independent variable array if np.any(x_min < x[0]): error_locations = np.where(x_min < x[0])[0] raise ValueError("Specified lower integration boundary values are " "outside the spectrum range. All minimum integration " "boundaries must be greater than, or equal to the " "lowest value in spectrum range. The erroneous x_min_" "array indices are:\n" + _formatter_array_regions(x_min, error_locations, window=0)) if np.any(x_max > x[-1]): error_locations = np.where(x_max > x[-1])[0] raise ValueError("Specified upper integration boundary values " "are outside the spectrum range. All maximum " "integration boundary values must be less " "than, or equal to the highest value in the spectrum " "range. The erroneous x_max array indices are: " "\n" + _formatter_array_regions(x_max, error_locations, window=0)) # find the bottom index of each integration bound bottom_indx = x.searchsorted(x_min) # find the top index of each integration bound # NOTE: +1 required for correct slicing for integration function top_indx = x.searchsorted(x_max) + 1 # set up temporary variables accum = 0 # integrate each region for bot, top in zip(bottom_indx, top_indx): # Note: If an odd number of intervals is specified, then the # even='avg' setting calculates and averages first AND last # N-2 intervals using trapezoidal rule. # If calculation speed become an issue, then consider changing # setting to 'first', or 'last' in which case trap rule is only # applied to either first or last N-2 intervals. accum += simps(y[bot:top], x[bot:top], even='avg') return accum
mstreatfield/anim-studio-tools
refs/heads/master
review_tool/sources/reviewTool/api/entity.py
5
## # \namespace reviewTool.api.entity # # \remarks [desc::commented] # # \author Dr. D Studios # \date 08/02/11 # import socket from ..database import db from ..database.threads import EntityVersionsThread from .version import Version from .clip import Clip class Entity(object): def __init__( self, context, entityType, name, shotgunId, status = None ): """ Constructor for the Entity class :param context: the associated context that loaded this entity :type <Context> || None: :param entityType: container type for this entity :type <str>: :param name: :type <str>: :param shotgunId: :type <int>: """ self._context = context # <Context> || None the context instance this entity is linked to self._entityType = entityType # <str> defines the tank container type for this entity self._shotgunId = shotgunId # <int> id used to lookup shotgun reference self._name = name # <str> name of this entity self._status = status self._sortOrder = -1 # <int> used to generate sorting keys for clips self._cutStart = None # <int> cut information - defaulted to None to trigger lookup as needed self._cutEnd = None # <int> cut information - defaulted to None to trigger lookup as needed self._handleStart = None # <int> self._handleEnd = None # <int> self._cache = {} # <dict> { <str> key: <variant> value, .. } self._versions = {} # <dict> { <str> dept: <list> versions [ <Version>, .. ], .. } def cache( self, key, value ): """ Caches the inputed value on this instance to the given key :param key: :type <str>: :param value: :type <variant>: """ self._cache[str(key)] = value def cachedValue( self, key, default = None ): """ Returns the cached value from this instance at the given key :param key: :type <str>: :param value: :type <variant>: """ return self._cache.get(str(key),default) def clearCache( self ): """ Clears out the current cache and resets the data for lookup again """ self._cache.clear() self._versions.clear() self._cutStart = None self._cutEnd = None self._handleStart = None self._handleEnd = None def collectVersions( self ): """ Looks up the version information from Shotgun """ if ( self._versions ): return thread = EntityVersionsThread(db.project(),Version.ShotgunFields,self.tankKey()) thread.run() self.setVersionsFromShotgun(thread.versions()) def collectCutData( self, attempts = 5 ): """ Collects the cut information for this entity instance from shotgun. This method is called internally as needed when the cache needs to be reloaded. To trigger this method again, you should clear the cache using clearCache. :param attempts: :type <int>: maximum number of attempts to collect data """ # check to see if the cut data is already loaded if ( self._cutEnd != None ): return # create the shotgun query information fields = [ 'sg_cut_start', 'sg_cut_end', 'sg_handle_start', 'sg_handle_end' ] filters = [[ 'shot', 'is', {'type':'Shot','id': self.shotgunId()} ]] order = [{'field_name':'id','direction':'desc'}] # lookup the cut data from shotgun sg_cut = None depth = 0 while ( not sg_cut and depth < attempts ): # wrap the lookupwithin a try/catch to support socket.error's try: sg_cut = db.session().find_one( 'Cut_sg_shots_Connection', filters, fields, order ) except socket.error, e: pass depth += 1 # try to lookup one last time, not blocking the error this time if ( not sg_cut ): sg_cut = db.session().find_one( 'Cut_sg_shots_Connection', filters, fields, order ) # set the cut data based on the result if ( not sg_cut ): sg_cut = {} self._cutStart = sg_cut.get('sg_cut_start',0) self._cutEnd = sg_cut.get('sg_cut_end',0) self._handleStart = sg_cut.get('sg_handle_start',0) self._handleEnd = sg_cut.get('sg_handle_end',0) def context( self ): """ Returns the context instance that this entity is associated with :return <Context>: """ return self._context def cutEnd( self ): """ Returns the cut information that is loaded for this entity. This method will automatically call the collectCutData method to lookup the data from shotgun if the information is not already cached. :return <int>: """ # cache the data self.collectCutData() # return the cut info return self._cutEnd def cutStart( self ): """ Returns the cut information that is loaded for this entity. This method will automatically call the collectCutData method to lookup the data from shotgun if the information is not already cached. :return <int>: """ self.collectCutData() return self._cutStart def entityType( self ): """ Returns the tank entity/container type that this entity represents :return <str>: """ return self._entityType def findVersions( self, department ): """ Looks up versions from this entity's cached version information that are associated with the inputed department type. :param department: :type <str>: :return <list> [ <Version>, .. ]: """ return self._versions.get(str(department),[]) def handleEnd( self ): """ Returns the handle information that is loaded for this entity. This method will automatically call the collectCutData method to lookup the data from shotgun if the information is not already cached. :return <int>: """ # cache the data self.collectCutData() # return the handle info return self._handleEnd def handleStart( self ): """ Returns the handle information that is loaded for this entity. This method will automatically call the collectCutData method to lookup the data from shotgun if the information is not already cached. :return <int>: """ self.collectCutData() return self._handleStart def name( self ): """ Returns the name for this entity instance :return <str>: """ return self._name def setContext( self, context ): self._context = context def setSortOrder( self, order ): """ Sets the sorting order for this entity in relation to its context :param order: :type <int>: """ self._sortOrder = order def setCutEnd( self, frame ): """ Sets the cut end information for this entity :param frame: :type <int>: """ self._cutEnd = frame def setCutStart( self, frame ): """ Sets the cut start information for this entity :param frame: :type <int>: """ self._cutStart = frame def setVersionsFromShotgun( self, sg_versions ): """ Initializes the version data for this entity to the inputed shotgun versions. This will clear out any existing version data, and organize the inputed versions based on their department, creating new Version instances for each shotgun version. :param sg_versions: :type <list> [ <dict> { <str> key: <variant> value, .. }, .. ] """ # create a list of Version insances from the inputed shotgun versions versions = [Version(self,sg_version) for sg_version in sg_versions] versions.sort( Version.compare ) # clear out the current version cache self._versions = {} # organize the new versions into dictionaries based on their department for version in versions: dept = version.department() if ( not dept in self._versions ): self._versions[dept] = [version] else: self._versions[dept].append(version) def sortOrder( self ): """ Returns the sort order for this entity based on its relation to other entities for the given context its in. :return <int>: """ return self._sortOrder def sortVersions( self ): """ Goes through and resorts all the versions based on the current sorting criteria """ for versions in self._versions.values(): versions.sort(Version.compare) def shotgunId( self ): """ Returns the unique shotgun id pointing to the shotgun record for this entity :return <int>: """ return self._shotgunId def siblings( self, padLeft = 1, padRight = 1 ): """ Returns the Entities that are on the left or right for this entity based on the inputd padding :param padLeft: :type <int>: :param padRight: :type <int>: :return <tuple> ( <list> [ <Entity>, .. ] previous, <list> [ <Entity>, .. ] following ) """ # return the siblings for this entity within its own context if ( self.context() ): return self.context().siblings( self, padLeft, padRight ) # return the siblings from shotgun based on shot cut order if ( self.entityType() == 'Shot' ): prev, next = db.findShotSiblings( self.name(), padLeft = padLeft, padRight = padRight ) # create entities from the shtogun information prevEntities = [ Entity(None,'Shot',s['code'],s['id'],s['sg_status_list']) for s in prev ] nextEntities = [ Entity(None,'Shot',s['code'],s['id'],s['sg_status_list']) for s in next ] return (prevEntities,nextEntities) print 'dont know how to look up without context or shot' return [] def status( self ): return self._status def versions( self ): """ Returns all of the versions that are linked to this entity, broken down by the department they're in. :return <dict> { <str> dept: <list> [ <Version>, .. ], .. }: """ return self._versions def uncache( self, key ): """ Removes the inptued key from the current cache on this entity. If the key is found, then the cached value is popped off the cache and returned. :param key: :type <str>: :return <variant>: """ key = str(key) if ( key in self._cache ): return self._cache.pop(key) return None def tankKey( self ): """ Returns the tank key for this entity by combining the entity/container type with the name of the entity. :return <str>: """ return '%s(%s)' % (self._entityType,self._name) def toXml( self, xparent ): """ Records this element to the inputed parent's xml element :param xparent: :type <XmlElement: """ xentity = xparent.createChild('entity') # set the attributes xentity.setAttribute( 'type', self._entityType ) xentity.setAttribute( 'id', self._shotgunId ) xentity.setAttribute( 'name', self._name ) xentity.setAttribute( 'status', self._status ) # set the properties xprops = xentity.createChild('props') xprops.setProperty( 'sortOrder', self._sortOrder ) xprops.setProperty( 'cutStart', self._cutStart ) xprops.setProperty( 'cutEnd', self._cutEnd ) xprops.setProperty( 'handleStart', self._handleStart ) xprops.setProperty( 'handleEnd', self._handleEnd ) return xentity @staticmethod def fromXml( xentity, context = None ): """ Restores this entity from the inputed xml :param xml: :type <XmlElement>: :param context: :type <XmlContext>: :return <Entity> || None: """ if ( not xentity ): return None entityType = xentity.attribute('type','') name = xentity.attribute('name','') shotgunId = int(xentity.attribute('shotgunId',0)) status = xentity.attribute('status','') output = Entity( context, entityType, name, shotgunId, status ) xprops = xentity.findChild('props') output._sortOrder = int(xprops.property('sortOrder',-1)) output._cutStart = int(xprops.property('cutStart',0)) output._cutEnd = int(xprops.property('cutEnd',0)) output._handleStart = int(xprops.property('handleStart',0)) output._handleEnd = int(xprops.property('handleEnd',0)) return output @staticmethod def quickLaunch( clipdata, playlist = '', defaultDepartment = '', padLeft = 0, padRight = 0, overrideAudio = False, compareMethod = None, mode = None ): """ Launches RV as a quick lookup with the inputed options :param clipdata: space separated list of shot/dept/version codes :type <list> [ <str>, .. ]: :param playlist: :type <str>: :param defaultDepartment: :type <str> || None: :param padLeft: :type <int>: :param padRight: :type <int>: :param compareMethod: :type <str> || None: :param version: :type <str> || None: :param overrideAudio: :type <bool>: :param mode: :type <str> || None: :return <bool> success: """ # make sure we have a valid shot if ( not (clipdata or playlist) ): print 'you need to provide both a shot and a department to load.' return None clips = [] # load from clip ids if ( clipdata ): # load the clips from data clips = Clip.fromData( clipdata, defaultDepartment, padLeft, padRight, overrideAudio ) # load from playlist elif ( playlist ): from .contexts.playlist import PlaylistContext context = PlaylistContext.fromFile( playlist ) if ( not context ): print 'Could not load playlist context from', playlist return None versions = context.versions() for version in versions: print 'loading tank data', version.displayName() version.collectSourceData() clips.append(Clip(version)) print 'playing clips...' for clip in clips: if ( mode == 'video' ): print clip.videoSource() else: print clip.imageSource() return Clip.playClips(clips,compareMethod=compareMethod,mode=mode) # Copyright 2008-2012 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios) # # This file is part of anim-studio-tools. # # anim-studio-tools is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # anim-studio-tools is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with anim-studio-tools. If not, see <http://www.gnu.org/licenses/>.
ifearcompilererrors/fle_redesign
refs/heads/master
fle_redesign/apps/main/views.py
1
import pygeoip import json import re from django.shortcuts import render_to_response from fle_redesign import settings # gic = pygeoip.GeoIP(settings.GEOIPDAT) def home(request): return render_to_response("home.html") def map(request): ips = open(settings.PROJECT_PATH + "/ips.txt").readlines() records = [gic.record_by_addr(item.strip()) for item in ips if item] locations = [] existing_locations = set([(0, 0)]) for record in records: if record: if (record['latitude'], record['longitude']) not in existing_locations: name = [record['city'], record['region_name'], record['country_name']] name = filter(lambda x: not re.match("^\d*$", x), name) name = ", ".join(name) locations.append({ "latitude": record['latitude'], "longitude": record['longitude'], "name": name, }) existing_locations.add((record['latitude'], record['longitude'])) # remove duplicates and remove null coordinates # location_info = list(set(location_info) - set([(0, 0)])) return render_to_response('map.html', {"locations": json.dumps(locations)})
timohtey/mediadrop_copy
refs/heads/master
mediadrop/lib/app_globals.py
1
# This file is a part of MediaDrop (http://www.mediadrop.net), # Copyright 2009-2014 MediaDrop contributors # For the exact contribution history, see the git revision log. # The source code contained in this file is licensed under the GPLv3 or # (at your option) any later version. # See LICENSE.txt in the main project directory, for more information. """The application's Globals object""" from beaker.cache import CacheManager from beaker.util import parse_cache_config_options class Globals(object): """Globals acts as a container for objects available throughout the life of the application """ def __init__(self, config): """One instance of Globals is created during application initialization and is available during requests via the 'app_globals' variable """ self.cache = cache = CacheManager(**parse_cache_config_options(config)) self.settings_cache = cache.get_cache('app_settings', expire=3600, type='memory') # We'll store the primary translator here for sharing between requests self.primary_language = None self.primary_translator = None @property def settings(self): def fetch_settings(): from mediadrop.model import DBSession, Setting settings_dict = dict(DBSession.query(Setting.key, Setting.value)) return settings_dict return self.settings_cache.get(createfunc=fetch_settings, key=None)
ZENGXH/scikit-learn
refs/heads/master
sklearn/linear_model/perceptron.py
245
# Author: Mathieu Blondel # License: BSD 3 clause from .stochastic_gradient import BaseSGDClassifier from ..feature_selection.from_model import _LearntSelectorMixin class Perceptron(BaseSGDClassifier, _LearntSelectorMixin): """Perceptron Read more in the :ref:`User Guide <perceptron>`. Parameters ---------- penalty : None, 'l2' or 'l1' or 'elasticnet' The penalty (aka regularization term) to be used. Defaults to None. alpha : float Constant that multiplies the regularization term if regularization is used. Defaults to 0.0001 fit_intercept : bool Whether the intercept should be estimated or not. If False, the data is assumed to be already centered. Defaults to True. n_iter : int, optional The number of passes over the training data (aka epochs). Defaults to 5. shuffle : bool, optional, default True Whether or not the training data should be shuffled after each epoch. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. verbose : integer, optional The verbosity level n_jobs : integer, optional The number of CPUs to use to do the OVA (One Versus All, for multi-class problems) computation. -1 means 'all CPUs'. Defaults to 1. eta0 : double Constant by which the updates are multiplied. Defaults to 1. class_weight : dict, {class_label: weight} or "balanced" or None, optional Preset for the class_weight fit parameter. Weights associated with classes. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` warm_start : bool, optional When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution. Attributes ---------- coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\ n_features] Weights assigned to the features. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function. Notes ----- `Perceptron` and `SGDClassifier` share the same underlying implementation. In fact, `Perceptron()` is equivalent to `SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant", penalty=None)`. See also -------- SGDClassifier References ---------- http://en.wikipedia.org/wiki/Perceptron and references therein. """ def __init__(self, penalty=None, alpha=0.0001, fit_intercept=True, n_iter=5, shuffle=True, verbose=0, eta0=1.0, n_jobs=1, random_state=0, class_weight=None, warm_start=False): super(Perceptron, self).__init__(loss="perceptron", penalty=penalty, alpha=alpha, l1_ratio=0, fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle, verbose=verbose, random_state=random_state, learning_rate="constant", eta0=eta0, power_t=0.5, warm_start=warm_start, class_weight=class_weight, n_jobs=n_jobs)
GreenBlast/Linger
refs/heads/master
LingerAdapters/GMailAdapter.py
1
import LingerAdapters.LingerBaseAdapter as lingerAdapters # Operation specific imports import imaplib import smtplib import email # # try: # from email.MIMEMultipart import MIMEMultipart # from email.MIMEBase import MIMEBase # from email.MIMEText import MIMEText # from email.Encoders import encode_base64 # except ImportError: # # Python3 from email.mime.multipart import MIMEMultipart from email.mime.base import MIMEBase from email.mime.text import MIMEText from email.encoders import encode_base64 import base64 import re import uuid import os from future.utils import itervalues import LingerConstants class GMailAdapter(lingerAdapters.LingerBaseAdapter): """GMailAdapter ables sending mails""" def __init__(self, configuration): super(GMailAdapter, self).__init__(configuration) self.logger.debug("GMailAdapter started") self.scheduled_job = None self.subscribers_dict = {} self.find_uid_reg = re.compile(r"""\(UID (?P<UID>\d+)\)""") # fields self._gmail_user = self.configuration["gmail_user"] self._gmail_password = self.configuration["gmail_password"] self.interval = float(self.configuration["intervalSec"]) self.imap_server = None self.last_uid = None # optional_fields self.recipient_email = self.configuration.get("recipient_email", self._gmail_user) self.logger.info("GMailAdapter configured with user=%s" % (self._gmail_user,)) def connect_to_imap_server(self): self.imap_server = imaplib.IMAP4_SSL("imap.gmail.com", 993) self.imap_server.login(self._gmail_user, self._gmail_password) def disconnect_from_imap_server(self): self.imap_server.close() self.imap_server.logout() def subscribe_for_new_mails(self, callback): subscription_uuid = uuid.uuid4() # If it's the first time we are connecting if self.subscribers_dict == {}: self.start_monitor_mails() self.subscribers_dict[subscription_uuid] = callback return subscription_uuid def unsubscribe(self, subscription_uuid): del self.subscribers_dict[subscription_uuid] # If it's the first time we are connecting if self.subscribers_dict == {}: self.stop_monitor_mails() def start_monitor_mails(self): self.connect_to_imap_server() self.last_uid = self.get_last_uid() self.scheduled_job = self.scheduler.add_job(self.monitor_new_mails, 'interval', seconds=self.interval) def stop_monitor_mails(self): if self.scheduled_job is not None: self.scheduled_job.remove() self.imap_server.close() def get_last_uid(self): # Getting last uid _, last_message = self.imap_server.select('INBOX') _, last_message_uid_info = self.imap_server.fetch(last_message[0], '(UID)') last_uid = self.find_uid_reg.findall(last_message_uid_info[0])[0] return last_uid def monitor_new_mails(self): update_uid_flag = False try: _, last_message = self.imap_server.select('INBOX') except Exception as e: self.connect_to_imap_server() _, last_message = self.imap_server.select('INBOX') result, data = self.imap_server.uid('search', None, 'UID', self.last_uid + ':*') messages = data[0].split() int_last_uid = int(self.last_uid) mail_list = [] for message_uid in messages: # SEARCH command *always* returns at least the most # recent message, even if it has already been synced if int(message_uid) > int_last_uid: update_uid_flag = True result, data = self.imap_server.uid('fetch', message_uid, '(RFC822)') mail_list.append(data[0][1]) if update_uid_flag: self.last_uid = self.get_last_uid() # TODO should remove this? # update_uid_flag = False for mail in mail_list: for subscriber_callback in itervalues(self.subscribers_dict): subscriber_callback(email.message_from_string(mail)) def send_mail(self, to, subject, text, **kwargs): message = MIMEMultipart() message['From'] = self._gmail_user message['To'] = to message['Subject'] = subject message.attach(MIMEText(text)) if LingerConstants.FILE_PATH_SRC in kwargs: image_part = MIMEBase('application', 'octet-stream') image_part.set_payload(open(kwargs[LingerConstants.FILE_PATH_SRC], 'rb').read()) encode_base64(image_part) image_part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(kwargs[LingerConstants.FILE_PATH_SRC])) message.attach(image_part) elif LingerConstants.IMAGE_DATA in kwargs: image_part = MIMEBase('application', 'octet-stream') image_part.set_payload(kwargs[LingerConstants.IMAGE_DATA]) encode_base64(image_part) image_part.add_header('Content-Disposition', 'attachment; filename="image.jpg"') message.attach(image_part) elif LingerConstants.IMAGE_BASE64_DATA in kwargs: image_part = MIMEBase('application', 'octet-stream') image_part.set_payload(base64.b64decode(kwargs[LingerConstants.IMAGE_BASE64_DATA])) encode_base64(image_part) image_part.add_header('Content-Disposition', 'attachment; filename="image.jpg"') message.attach(image_part) mail_server = smtplib.SMTP("smtp.gmail.com", 587) mail_server.ehlo() mail_server.starttls() mail_server.ehlo() mail_server.login(self._gmail_user, self._gmail_password) mail_server.sendmail(self._gmail_user, to, message.as_string()) mail_server.quit() def send_message(self, subject, text, **kwargs): self.send_mail(self.recipient_email, subject, text, **kwargs) class GMailAdapterFactory(lingerAdapters.LingerBaseAdapterFactory): """GMailAdapterFactory generates GMailAdapter instances""" def __init__(self): super(GMailAdapterFactory, self).__init__() self.item = GMailAdapter @staticmethod def get_instance_name(): return "GMailAdapter" def get_fields(self): fields, optional_fields = super(GMailAdapterFactory, self).get_fields() fields += [('gmail_user', "string"), ('gmail_password', "string"), ("intervalSec", "float")] optional_fields = [("recipient_email", "string")] return fields, optional_fields
hlin117/statsmodels
refs/heads/master
statsmodels/sandbox/distributions/tests/test_multivariate.py
31
# -*- coding: utf-8 -*- """ Created on Sat Apr 16 15:02:13 2011 @author: Josef Perktold """ import numpy as np from numpy.testing import assert_almost_equal, assert_array_almost_equal from statsmodels.sandbox.distributions.multivariate import ( mvstdtprob, mvstdnormcdf) from statsmodels.sandbox.distributions.mv_normal import MVT, MVNormal class Test_MVN_MVT_prob(object): #test for block integratal, cdf, of multivariate t and normal #comparison results from R def __init__(self): self.corr_equal = np.asarray([[1.0, 0.5, 0.5],[0.5,1,0.5],[0.5,0.5,1]]) self.a = -1 * np.ones(3) self.b = 3 * np.ones(3) self.df = 4 corr2 = self.corr_equal.copy() corr2[2,1] = -0.5 self.corr2 = corr2 def test_mvn_mvt_1(self): a, b = self.a, self.b df = self.df corr_equal = self.corr_equal #result from R, mvtnorm with option #algorithm = GenzBretz(maxpts = 100000, abseps = 0.000001, releps = 0) # or higher probmvt_R = 0.60414 #report, ed error approx. 7.5e-06 probmvn_R = 0.673970 #reported error approx. 6.4e-07 assert_almost_equal(probmvt_R, mvstdtprob(a, b, corr_equal, df), 4) assert_almost_equal(probmvn_R, mvstdnormcdf(a, b, corr_equal, abseps=1e-5), 4) mvn_high = mvstdnormcdf(a, b, corr_equal, abseps=1e-8, maxpts=10000000) assert_almost_equal(probmvn_R, mvn_high, 5) #this still barely fails sometimes at 6 why?? error is -7.2627419411830374e-007 #>>> 0.67396999999999996 - 0.67397072627419408 #-7.2627419411830374e-007 #>>> assert_almost_equal(0.67396999999999996, 0.67397072627419408, 6) #Fail def test_mvn_mvt_2(self): a, b = self.a, self.b df = self.df corr2 = self.corr2 probmvn_R = 0.6472497 #reported error approx. 7.7e-08 probmvt_R = 0.5881863 #highest reported error up to approx. 1.99e-06 assert_almost_equal(probmvt_R, mvstdtprob(a, b, corr2, df), 4) assert_almost_equal(probmvn_R, mvstdnormcdf(a, b, corr2, abseps=1e-5), 4) def test_mvn_mvt_3(self): a, b = self.a, self.b df = self.df corr2 = self.corr2 #from -inf #print 'from -inf' a2 = a.copy() a2[:] = -np.inf probmvn_R = 0.9961141 #using higher precision in R, error approx. 6.866163e-07 probmvt_R = 0.9522146 #using higher precision in R, error approx. 1.6e-07 assert_almost_equal(probmvt_R, mvstdtprob(a2, b, corr2, df), 4) assert_almost_equal(probmvn_R, mvstdnormcdf(a2, b, corr2, maxpts=100000, abseps=1e-5), 4) def test_mvn_mvt_4(self): a, bl = self.a, self.b df = self.df corr2 = self.corr2 #from 0 to inf #print '0 inf' a2 = a.copy() a2[:] = -np.inf probmvn_R = 0.1666667 #error approx. 6.1e-08 probmvt_R = 0.1666667 #error approx. 8.2e-08 assert_almost_equal(probmvt_R, mvstdtprob(np.zeros(3), -a2, corr2, df), 4) assert_almost_equal(probmvn_R, mvstdnormcdf(np.zeros(3), -a2, corr2, maxpts=100000, abseps=1e-5), 4) def test_mvn_mvt_5(self): a, bl = self.a, self.b df = self.df corr2 = self.corr2 #unequal integration bounds #print "ue" a3 = np.array([0.5, -0.5, 0.5]) probmvn_R = 0.06910487 #using higher precision in R, error approx. 3.5e-08 probmvt_R = 0.05797867 #using higher precision in R, error approx. 5.8e-08 assert_almost_equal(mvstdtprob(a3, a3+1, corr2, df), probmvt_R, 4) assert_almost_equal(probmvn_R, mvstdnormcdf(a3, a3+1, corr2, maxpts=100000, abseps=1e-5), 4) class TestMVDistributions(object): #this is not well organized def __init__(self): covx = np.array([[1.0, 0.5], [0.5, 1.0]]) mu3 = [-1, 0., 2.] cov3 = np.array([[ 1. , 0.5 , 0.75], [ 0.5 , 1.5 , 0.6 ], [ 0.75, 0.6 , 2. ]]) self.mu3 = mu3 self.cov3 = cov3 mvn3 = MVNormal(mu3, cov3) mvn3c = MVNormal(np.array([0,0,0]), cov3) self.mvn3 = mvn3 self.mvn3c = mvn3c def test_mvn_pdf(self): cov3 = self.cov3 mvn3 = self.mvn3 mvn3c = self.mvn3c r_val = [-7.667977543898155, -6.917977543898155, -5.167977543898155] assert_array_almost_equal( mvn3.logpdf(cov3), r_val, decimal = 14) #decimal 18 r_val = [0.000467562492721686, 0.000989829804859273, 0.005696077243833402] assert_array_almost_equal( mvn3.pdf(cov3), r_val, decimal = 17) #cheating new mean, same cov, too dangerous, got wrong instance in tests #mvn3.mean = np.array([0,0,0]) mvn3b = MVNormal(np.array([0,0,0]), cov3) r_val = [0.02914269740502042, 0.02269635555984291, 0.01767593948287269] assert_array_almost_equal( mvn3b.pdf(cov3), r_val, decimal = 16) def test_mvt_pdf(self): cov3 = self.cov3 mu3 = self.mu3 mvt = MVT((0,0), 1, 5) assert_almost_equal(mvt.logpdf(np.array([0.,0.])), -1.837877066409345, decimal=15) assert_almost_equal(mvt.pdf(np.array([0.,0.])), 0.1591549430918953, decimal=15) mvt.logpdf(np.array([1.,1.]))-(-3.01552989458359) mvt1 = MVT((0,0), 1, 1) mvt1.logpdf(np.array([1.,1.]))-(-3.48579549941151) #decimal=16 rvs = mvt.rvs(100000) assert_almost_equal(np.cov(rvs, rowvar=0), mvt.cov, decimal=1) mvt31 = MVT(mu3, cov3, 1) assert_almost_equal(mvt31.pdf(cov3), [0.0007276818698165781, 0.0009980625182293658, 0.0027661422056214652], decimal=17) mvt = MVT(mu3, cov3, 3) assert_almost_equal(mvt.pdf(cov3), [0.000863777424247410, 0.001277510788307594, 0.004156314279452241], decimal=17) if __name__ == '__main__': import nose nose.runmodule(argv=['__main__','-vvs','-x'],#,'--pdb', '--pdb-failure'], exit=False) print('Done')
40423206/2016fallcadp_hw
refs/heads/gh-pages
plugin/liquid_tags/test_flickr.py
278
from . import flickr try: from unittest.mock import patch except ImportError: from mock import patch import os import pytest import re PLUGIN_DIR = os.path.dirname(__file__) TEST_DATA_DIR = os.path.join(PLUGIN_DIR, 'test_data') @pytest.mark.parametrize('input,expected', [ ('18873146680 large "test 1"', dict(photo_id='18873146680', size='large', alt='test 1')), ('18873146680 large \'test 1\'', dict(photo_id='18873146680', size='large', alt='test 1')), ('18873143536360 medium "test number two"', dict(photo_id='18873143536360', size='medium', alt='test number two')), ('18873143536360 small "test number 3"', dict(photo_id='18873143536360', size='small', alt='test number 3')), ('18873143536360 "test 4"', dict(photo_id='18873143536360', size=None, alt='test 4')), ('18873143536360', dict(photo_id='18873143536360', size=None, alt=None)), ('123456 small', dict(photo_id='123456', size='small', alt=None)) ]) def test_regex(input, expected): assert re.match(flickr.PARSE_SYNTAX, input).groupdict() == expected @pytest.mark.parametrize('input,expected', [ (['1', 'server1', '1', 'secret1', 'small'], 'https://farm1.staticflickr.com/server1/1_secret1_n.jpg'), (['2', 'server2', '2', 'secret2', 'medium'], 'https://farm2.staticflickr.com/server2/2_secret2_c.jpg'), (['3', 'server3', '3', 'secret3', 'large'], 'https://farm3.staticflickr.com/server3/3_secret3_b.jpg') ]) def test_source_url(input, expected): assert flickr.source_url( input[0], input[1], input[2], input[3], input[4]) == expected @patch('liquid_tags.flickr.urlopen') def test_generage_html(mock_urlopen): # mock the return to deliver the flickr.json file instead with open(TEST_DATA_DIR + '/flickr.json', 'rb') as f: mock_urlopen.return_value.read.return_value = f.read() attrs = dict( photo_id='1234567', size='large', alt='this is a test' ) expected = ('<a href="https://www.flickr.com/photos/' 'marvinxsteadfast/18841055371/">' '<img src="https://farm6.staticflickr.com/5552/1234567_' '17ac287217_b.jpg" alt="this is a test"></a>') assert flickr.generate_html(attrs, 'abcdef') == expected
msebire/intellij-community
refs/heads/master
python/testData/override/py3k_after.py
83
class A: def m(self): pass class B(A): def m(self): <selection>super().m()</selection>
BackupTheBerlios/pixies-svn
refs/heads/master
pixies/elements/inline.py
1
""" $Id$ $URL$ Copyright (C) 2004 Matteo Merli <matteo.merli@gmail.com> This code is licenced under the GPL. See LICENSE file. """ from pixies.utils import * from properties import * from link import * class Inline( Properties ): """ This represent the &lt;fo:inline&gt; element. """ def __init__(self, node): Properties.__init__(self) self.node = node self.attrs = Attrs( node ) self.common_fonts( self.attrs ) self.common_borders( self.attrs ) self.common_align( self.attrs ) self.common_text( self.attrs ) self.prefix = '' self.suffix = '' p = self.properties font = {} if 'font-family' in self.attrs: font['face'] = self.attrs['font-family'] if 'font-size' in p: font['font-size'] = p['font-size'] if 'color' in p: font['color'] = p['color'] if 'background-color' in p: font['bgcolor'] = p['background-color'] if font: self._add( 'font', font ) print font, self.getText() if self.attrs.get('font-weight') == 'bold': self._add('b') if self.attrs.get('font-style') in ('italic', 'oblique'): self._add('i') def getText(self): """ Return the processed text of the Inline object, resolving all the nested tags. """ self.text = '' for n in self.node.childNodes: if n.nodeType == n.TEXT_NODE: self.text += n.data elif n.nodeName == 'fo:inline': i = Inline( n ) self.text += i.getText() elif n.nodeName == 'fo:basic-link': l = Link( n ) self.text += l.getText() elif n.nodeName == 'fo:page-number': self.text += "#1#" ## print "\nAdded Inline Formatting:" self.text = self.prefix + escape_tags(self.text) + self.suffix print "###", self.text return unicode( trim_spaces( self.text ) ) def _add( self, key, attrs={} ): """ Add a xml-like tag to pass inline formatting specification to paragraph builder. """ self.prefix += '<%s' % key for k,v in attrs.items(): self.prefix += ' %s="%s"' % (k,v) self.prefix += '>' self.suffix = '</%s>%s' % ( key, self.suffix )
emfcamp/micropython
refs/heads/tilda-master
tests/extmod/ujson_dumps_float.py
61
try: import ujson as json except ImportError: import json print(json.dumps(1.2))
xingyepei/edx-platform
refs/heads/release
lms/djangoapps/lti_provider/tests/test_tasks.py
36
""" Tests for the LTI outcome service handlers, both in outcomes.py and in tasks.py """ import ddt from django.test import TestCase from mock import patch, MagicMock from student.tests.factories import UserFactory from lti_provider.models import GradedAssignment, LtiConsumer, OutcomeService import lti_provider.tasks as tasks from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator class BaseOutcomeTest(TestCase): """ Super type for tests of both the leaf and composite outcome celery tasks. """ def setUp(self): super(BaseOutcomeTest, self).setUp() self.course_key = CourseLocator( org='some_org', course='some_course', run='some_run' ) self.usage_key = BlockUsageLocator( course_key=self.course_key, block_type='problem', block_id='block_id' ) self.user = UserFactory.create() self.consumer = LtiConsumer( consumer_name='Lti Consumer Name', consumer_key='consumer_key', consumer_secret='consumer_secret', instance_guid='tool_instance_guid' ) self.consumer.save() outcome = OutcomeService( lis_outcome_service_url='http://example.com/service_url', lti_consumer=self.consumer ) outcome.save() self.assignment = GradedAssignment( user=self.user, course_key=self.course_key, usage_key=self.usage_key, outcome_service=outcome, lis_result_sourcedid='sourcedid', version_number=1, ) self.assignment.save() self.send_score_update_mock = self.setup_patch( 'lti_provider.outcomes.send_score_update', None ) def setup_patch(self, function_name, return_value): """ Patch a method with a given return value, and return the mock """ mock = MagicMock(return_value=return_value) new_patch = patch(function_name, new=mock) new_patch.start() self.addCleanup(new_patch.stop) return mock @ddt.ddt class SendLeafOutcomeTest(BaseOutcomeTest): """ Tests for the send_leaf_outcome method in tasks.py """ @ddt.data( (2.0, 2.0, 1.0), (2.0, 0.0, 0.0), (1, 2, 0.5), ) @ddt.unpack def test_outcome_with_score(self, earned, possible, expected): tasks.send_leaf_outcome( self.assignment.id, # pylint: disable=no-member earned, possible ) self.send_score_update_mock.assert_called_once_with(self.assignment, expected) @ddt.ddt class SendCompositeOutcomeTest(BaseOutcomeTest): """ Tests for the send_composite_outcome method in tasks.py """ def setUp(self): super(SendCompositeOutcomeTest, self).setUp() self.descriptor = MagicMock() self.descriptor.location = BlockUsageLocator( course_key=self.course_key, block_type='problem', block_id='problem', ) self.weighted_scores = MagicMock() self.weighted_scores_mock = self.setup_patch( 'lti_provider.tasks.get_weighted_scores', self.weighted_scores ) self.module_store = MagicMock() self.module_store.get_item = MagicMock(return_value=self.descriptor) self.check_result_mock = self.setup_patch( 'lti_provider.tasks.modulestore', self.module_store ) @ddt.data( (2.0, 2.0, 1.0), (2.0, 0.0, 0.0), (1, 2, 0.5), ) @ddt.unpack def test_outcome_with_score_score(self, earned, possible, expected): self.weighted_scores.score_for_module = MagicMock(return_value=(earned, possible)) tasks.send_composite_outcome( self.user.id, unicode(self.course_key), self.assignment.id, 1 # pylint: disable=no-member ) self.send_score_update_mock.assert_called_once_with(self.assignment, expected) def test_outcome_with_outdated_version(self): self.assignment.version_number = 2 self.assignment.save() tasks.send_composite_outcome( self.user.id, unicode(self.course_key), self.assignment.id, 1 # pylint: disable=no-member ) self.assertEqual(self.weighted_scores_mock.call_count, 0)
rfguri/vimfiles
refs/heads/master
bundle/ycm/third_party/ycmd/ycmd/tests/clang/testdata/noflags/.ycm_extra_conf.py
7
def FlagsForFile( filename ): return { 'flags': [] }
vickenty/ookoobah
refs/heads/master
pyglet-c9188efc2e30/experimental/input/input.py
20
#!/usr/bin/env python ''' ''' __docformat__ = 'restructuredtext' __version__ = '$Id: $' import sys class InputException(Exception): pass class InputDeviceExclusiveException(InputException): pass if sys.platform == 'darwin': import osx get_devices = osx.get_devices elif sys.platform.startswith('linux'): import linux get_devices = linux.get_devices elif sys.platform in ('win32', 'cygwin'): import dinput get_devices = dinput.get_devices
andrewor14/spark
refs/heads/master
examples/src/main/python/ml/rformula_example.py
123
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function # $example on$ from pyspark.ml.feature import RFormula # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("RFormulaExample")\ .getOrCreate() # $example on$ dataset = spark.createDataFrame( [(7, "US", 18, 1.0), (8, "CA", 12, 0.0), (9, "NZ", 15, 0.0)], ["id", "country", "hour", "clicked"]) formula = RFormula( formula="clicked ~ country + hour", featuresCol="features", labelCol="label") output = formula.fit(dataset).transform(dataset) output.select("features", "label").show() # $example off$ spark.stop()
exercism/xpython
refs/heads/master
exercises/scrabble-score/scrabble_score.py
7
def score(word): pass
siosio/intellij-community
refs/heads/master
python/testData/inspections/PyGlobalUndefinedInspection/trueNegative.py
75
__author__ = 'ktisha' bar = 1 def foo(): global bar print bar foo()
leereilly/django-1
refs/heads/master
django/contrib/gis/db/backends/spatialite/models.py
403
""" The GeometryColumns and SpatialRefSys models for the SpatiaLite backend. """ from django.db import models from django.contrib.gis.db.backends.base import SpatialRefSysMixin class GeometryColumns(models.Model): """ The 'geometry_columns' table from SpatiaLite. """ f_table_name = models.CharField(max_length=256) f_geometry_column = models.CharField(max_length=256) type = models.CharField(max_length=30) coord_dimension = models.IntegerField() srid = models.IntegerField(primary_key=True) spatial_index_enabled = models.IntegerField() class Meta: db_table = 'geometry_columns' managed = False @classmethod def table_name_col(cls): """ Returns the name of the metadata column used to store the the feature table name. """ return 'f_table_name' @classmethod def geom_col_name(cls): """ Returns the name of the metadata column used to store the the feature geometry column. """ return 'f_geometry_column' def __unicode__(self): return "%s.%s - %dD %s field (SRID: %d)" % \ (self.f_table_name, self.f_geometry_column, self.coord_dimension, self.type, self.srid) class SpatialRefSys(models.Model, SpatialRefSysMixin): """ The 'spatial_ref_sys' table from SpatiaLite. """ srid = models.IntegerField(primary_key=True) auth_name = models.CharField(max_length=256) auth_srid = models.IntegerField() ref_sys_name = models.CharField(max_length=256) proj4text = models.CharField(max_length=2048) @property def wkt(self): from django.contrib.gis.gdal import SpatialReference return SpatialReference(self.proj4text).wkt class Meta: db_table = 'spatial_ref_sys' managed = False
NickleDave/hybrid-vocal-classifier
refs/heads/main
tests/unit_test/test_features.py
1
""" test features.extract module """ import os import evfuncs import numpy as np import pytest import yaml import hvc.audiofileIO import hvc.features from hvc.utils import annotation from hvc.parse.ref_spect_params import refs_dict @pytest.fixture() def has_window_error(test_data_dir): filename = os.path.join( test_data_dir, os.path.normpath("cbins/window_error/gy6or6_baseline_220312_0901.106.cbin"), ) index = 19 return filename, index class TestFromFile: def test_song_w_nan(self, has_window_error, hvc_source_dir): """tests that features_arr[ind,:] == np.nan where ind is the row corresponding to a syllable from a song for which a spectrogram could not be generated, and so single-syllable features cannot be extracted from it """ with open( os.path.join(hvc_source_dir, os.path.normpath("parse/feature_groups.yml")) ) as ftr_grp_yaml: valid_feature_groups_dict = yaml.load(ftr_grp_yaml, Loader=yaml.FullLoader) spect_params = refs_dict["koumura"] segment_params = { "threshold": 1500, "min_syl_dur": 0.01, "min_silent_dur": 0.006, } svm_features = valid_feature_groups_dict["svm"] fe = hvc.features.extract.FeatureExtractor( spect_params=spect_params, segment_params=segment_params, feature_list=svm_features, ) filename, index = has_window_error annotation_dict = annotation.notmat_to_annot_dict(filename + ".not.mat") with pytest.warns(UserWarning): extract_dict = fe._from_file( filename=filename, file_format="evtaf", labels_to_use="iabcdefghjk", labels=annotation_dict["labels"], onsets_Hz=annotation_dict["onsets_Hz"], offsets_Hz=annotation_dict["offsets_Hz"], ) ftr_arr = extract_dict["features_arr"] assert np.alltrue(np.isnan(ftr_arr[19, :])) def test_cbin(self, hvc_source_dir, test_data_dir): """tests all features on a single .cbin file""" spect_params = refs_dict["tachibana"] segment_params = { "threshold": 1500, "min_syl_dur": 0.01, "min_silent_dur": 0.006, } with open( os.path.join(hvc_source_dir, os.path.normpath("parse/feature_groups.yml")) ) as ftr_grp_yaml: ftr_grps = yaml.load(ftr_grp_yaml, Loader=yaml.FullLoader) cbin = os.path.join( test_data_dir, os.path.normpath( "cbins/gy6or6/032412/" "gy6or6_baseline_240312_0811.1165.cbin" ), ) annotation_dict = annotation.notmat_to_annot_dict(cbin + ".not.mat") for feature_list in ( ftr_grps["knn"], ftr_grps["svm"], ["flatwindow"], ): fe = hvc.features.extract.FeatureExtractor( spect_params=spect_params, segment_params=segment_params, feature_list=feature_list, ) extract_dict = fe._from_file( cbin, file_format="evtaf", labels_to_use="iabcdefghjk", labels=annotation_dict["labels"], onsets_Hz=annotation_dict["onsets_Hz"], offsets_Hz=annotation_dict["offsets_Hz"], ) if "features_arr" in extract_dict: ftrs = extract_dict["features_arr"] feature_inds = extract_dict["feature_inds"] # _from_file should return an ndarray assert type(ftrs) == np.ndarray # and the number of columns should equal tbe number of feature indices # that _from_file determined there were (not necessarily equal to the # number of features in the list; some features such as the spectrogram # averaged over columns occupy several columns assert ftrs.shape[-1] == feature_inds.shape[-1] # however the **unique** number of features in feature indices should be # equal to the number of items in the feature list assert np.unique(feature_inds).shape[-1] == len(feature_list) elif "neuralnet_inputs_dict" in extract_dict: neuralnet_ftrs = extract_dict["neuralnet_inputs_dict"] assert type(neuralnet_ftrs) == dict else: raise ValueError( "neither features_arr or neuralnet_inputs_dict " "were returned by FeatureExtractor" )
ccomb/OpenUpgrade
refs/heads/master
addons/website/tests/test_ui.py
34
import openerp.tests class TestUi(openerp.tests.HttpCase): def test_01_public_homepage(self): self.phantom_js("/", "console.log('ok')", "openerp.website.snippet") def test_03_admin_homepage(self): self.phantom_js("/", "console.log('ok')", "openerp.website.editor", login='admin') def test_04_admin_tour_banner(self): self.phantom_js("/", "openerp.website.Tour.run('banner', 'test')", "openerp.website.Tour.tours.banner", login='admin') # vim:et:
sehlat57/slack-messages-files-graber
refs/heads/master
models.py
1
import re import os import requests import time import datetime from slacker import Slacker from utils import Utils status_bar = Utils() class SlackerGraber(object): def __init__(self, token): self.slacker = Slacker(token) def get_channels_id(self, chanls): """ function gets id of given channels in params, refer to documentation below https://api.slack.com/methods/channels.list :param chanls: names of channels :return: ids of given channels """ channels = self.slacker.channels.list().body ids = {channel['name']: channel['id'] for channel in channels[ 'channels'] if channel['name'] in chanls} if ids: return ids def get_users_id(self, users): """ function gets id of given users in params, refer to documentation below https://api.slack.com/methods/users.list :param users: usernames :return: ids of given users """ channel_users = self.slacker.users.list().body ids = {user['id']: user['name'] for user in channel_users[ 'members'] if user['name'] in users} if ids: return ids def get_messages(self, channel, users, timestamp, count=1000): """ function retrieves messages of user from channel history refer to documentation below https://api.slack.com/methods/channels.history :param channel: slack channel (id) :param users: slack user (id) :param timestamp: earliest message to retrieve :param count: max number of message to retrieve :return: list of messages except file-messages """ def timestamp_to_date(stamp): """ function converts timestamp to "humanized" date :param stamp: timestamp :return: formatted date """ return datetime.datetime.fromtimestamp( int(float(stamp)) ).strftime('%d.%m.%Y') slack_messages = self.slacker.channels.history(channel=channel, oldest=timestamp, count=count).body messages = ['{} - {} - {}'.format( timestamp_to_date(message['ts']), users[message['user']], message[ 'text']) for message in slack_messages['messages'] if message.get('user') in users and 'a file:' not in message.get('text')] if messages: return messages def get_file_links(self, channel, user, timestamp, count=1000): """ function retrieves links of files, extentions, filenames from given channel and user refer to doumentation below https://api.slack.com/methods/files.list :param channel: slack channel (id) :param user: slack user (id) :param timestamp: earliest file to retrieve :param count: max number of files to retrieve :return: list with filename, url link of the file and extension of the file """ slack_files = self.slacker.files.list(user=user, ts_from=timestamp, count=count).body files = [[file['name'].split('.')[0], file.setdefault( 'url_private_download', None), file[ 'filetype']] for file in slack_files['files'] if channel in file['channels']] if files: return files @staticmethod def date_to_timestamp(date): """ function converts "humanized" date to timestamp :param date: date :return: timestamp """ regex = re.compile(r'\W') _date = regex.sub('.', date) return time.mktime(datetime.datetime.strptime( _date, '%d.%m.%Y').timetuple()) class Downloader(object): def __init__(self, path, filename, url, extension, headers): try: self.url = requests.get(url, headers=headers, stream=True) self.path = path self.filename = filename self.extension = extension self.errors = [] except ValueError: self.url = None self.filename = filename def donwload_file(self, chunk_size=1024 * 8, status=None): """ function downloads file from slack channels :param chunk_size: size of chunk to download :param status: if True progress of file download print to terminal :return: None if error occurs, 1 if download was successful """ if self.url is None: error = 'unable to download file {}'.format(self.filename) print(error) self.errors.append(error) return if not os.path.isdir(self.path): raise OSError('invalid path, does not exist') if self.extension == 'space': self.extension = 'txt' filename = '{}.{}'.format(self.filename, self.extension) path = os.path.join(self.path, filename) if os.path.isfile(path): error = '{} already exist'.format(self.filename) print(error) self.errors.append(error) return file_size = self.read_file_size(self.url) if file_size is None: error = 'unable to download file {},' \ ' unable to get file size'.format( self.filename) print(error) self.errors.append(error) return start = time.time() bytes_received = 0 try: with open(path, 'wb') as result_file: for chunk in self.url.iter_content(chunk_size): bytes_received += len(chunk) result_file.write(chunk) if status: status_bar.progress_bar(completion=bytes_received, total=int(file_size), start_time=start) except KeyboardInterrupt: os.remove(path) raise KeyboardInterrupt('download interupted by user') return 1 @staticmethod def read_file_size(response): meta_data = dict(response.headers.items()) return meta_data.get('Content-Length') or \ meta_data.get('content-length') class MessagesWriter(object): def __init__(self, path, channel_name, messages): self.path = path self.messages = messages self.channel_name = channel_name def write_messages(self, timestamp=None): """ function writes text from a list to the file :param timestamp: optional param to include in filename :return: None """ if not os.path.isdir(self.path): raise OSError('invalid path, does not exist') if timestamp: timestamp = time.time() dest_path = os.path.join(self.path, str(timestamp) + ' ' + self.channel_name + ' messages.txt') else: dest_path = os.path.join(self.path, self.channel_name + ' messages.txt') with open(dest_path, 'w') as dest_file: dest_file.writelines(self.messages)
mateuszdargacz/lux_dom
refs/heads/master
lux_dom/urls.py
1
from django.conf.urls import patterns, include, url from django.conf import settings from django.views.generic import TemplateView from django.conf.urls.static import static # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', url(r'^', include('apps.core.urls', namespace="core")), url(r'^zohoverify/verifyforzoho.html/?$', TemplateView.as_view(template_name='zoho.html'), name='gallery'), url(r'^admin/', include(admin.site.urls)), ) # Uncomment the next line to serve media files in dev. if settings.DEBUG: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
adblockplus/gyp
refs/heads/master
test/same-source-file-name/gyptest-pass-shared.py
51
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Checks that gyp does not fail on shared_library targets which have several files with the same basename. """ import TestGyp test = TestGyp.TestGyp() test.run_gyp('double-shared.gyp', chdir='src') test.pass_test()
quentin-xia/Maticv
refs/heads/master
common/app.py
1
#!/usr/bin/env python # -*- coding: utf-8 -*- import os,socket,tempfile,time try: import http.client as httplib from urllib import request as url_request except ImportError: #above is available in py3+,below is py2.7 import httplib as httplib import urllib as url_request #配置app def configure_app(app): if app[:4].lower() == "http": tmpDir = tmpfile.mkdtemp() randNum = str(time.time().replace(".","")) tmpPath = os.path.join(tmpDir,randNum + ".apk") configure_downloaded_app(app,tmpPath) return tmpPath else: configure_local_app(app) return app #本地app def configure_local_app(app): ext = app[-4:].lower() if ext == ".apk": if not os.path.exists(app): msg = "App is not exists: %s" % app raise Exception(msg) else: msg = "Using local app,but didn't end in .apk" raise Exception(msg) #下载app def configure_downloaded_app(app,path): ext = app[-4:].lower() if ext == ".apk": down_load_app(app,path) if os.path.getsize(path) < 1024: msg = "Failed downloading app from app URL(%s)" % app raise Exception(msg) else: msg = "App URL(%s) didn't sem to point to a .apk file" % app raise Exception(msg) #下载 def download_app(app,path): try: #set urllib timeout socket.setdefaulttimeout(600) url_request.urlretrieve(app,path) except: msg = "Failed downloading app from app URL(%s)" % app raise Exception(msg)
Foxfanmedium/python_training
refs/heads/master
Book_courses/Berry_P/chapter_4/mymodules/setup.py
1
from setuptools import setup setup( name='vsearch', version='1.0', description='The Head First Python Search Tools', author='HF Python 2e', author_email='hfpy2e@gmail.com', url='headfirstlabs.com', py_modules=['vsearch'], )
rustychris/freebird
refs/heads/master
software/freebird/test_ellipse_fit.py
1
import compass_cal reload(compass_cal) fn="../../../../../ct_river/data/2014-05/tioga_winch/freebird/20140514/compass-0x5002930054E40942-20140514T0643.dat.npz" cf=compass_cal.CalibrationFit(fn) cf.fit() cf.write_cal('compass.cal',overwrite=True) samples=cf.raw_data['samples'] figure(1).clf() plot(samples[:,0],samples[:,1],'r.') plot(samples[:,1],samples[:,2],'g.') plot(samples[:,0],samples[:,2],'b.') axis('equal') ## Brute force fitting for axis-aligned ellipse. # Find the center: # the center is defined by the (x,y,z) point which minimizes variance in the distance from the center. # fitting all five parameters at once was considerably slower from scipy.optimize import fmin def cost_center(xyz): return var(np.sum(( samples - xyz )**2,axis=1)) center=fmin(cost_center,[1,1,1]) csamples=samples-center figure(1).clf() plot(csamples[:,0],csamples[:,1],'r.') plot(csamples[:,1],csamples[:,2],'g.') plot(csamples[:,0],csamples[:,2],'b.') axis('equal') # And the scale factors: def cost_scale(xy): # overall scale doesn't matter, so really just two degrees of freedom # and don't allow inverting any axis xyz=array([abs(xy[0]),abs(xy[1]),1.0]) # return var(np.sum(( csamples*xyz )**2,axis=1)) scales=abs(fmin(cost_scale,[1,1])) scales=array([scales[0],scales[1],1.0]) adjusted=csamples*scales if 0: from mayavi import mlab mfig=mlab.figure() mlab.clf() mlab.points3d(adjusted[:,0],adjusted[:,1],adjusted[:,2]) mlab.points3d([0],[0],[0],color=(1,0,0),scale_factor=20)
SOM-st/RPySOM
refs/heads/master
src/som/compiler/symbol.py
1
# Symbol is a 'lightweight' enum, in Python 3.4, we could use Enum as superclass class Symbol(object): NONE = -1 Integer = 0 Double = 1 Not = 2 And = 3 Or = 4 Star = 5 Div = 6 Mod = 7 Plus = 8 Minus = 9 Equal = 10 More = 11 Less = 12 Comma = 13 At = 14 Per = 15 NewBlock = 16 EndBlock = 17 Colon = 18 Period = 19 Exit = 20 Assign = 21 NewTerm = 22 EndTerm = 23 Pound = 24 Primitive = 25 Separator = 26 STString = 27 Identifier = 28 Keyword = 29 KeywordSequence = 30 OperatorSequence = 31 def _sorted_symbols(cls): "NOT_RPYTHON" """This function is only called a single time, at load time of this module. For RPython, this means, during translation of the module. """ return [key for value, key in sorted([(value, key) for key, value in cls.__dict__.items() if isinstance(value, int)])] _symbols = _sorted_symbols(Symbol) def symbol_as_str(symbol): index = symbol + 1 if index > len(_symbols): raise ValueError('No Symbol defined for the value %d.' % symbol) else: return _symbols[index]
Tekcafe/Test-kernel
refs/heads/master
Documentation/target/tcm_mod_builder.py
4981
#!/usr/bin/python # The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD # # Copyright (c) 2010 Rising Tide Systems # Copyright (c) 2010 Linux-iSCSI.org # # Author: nab@kernel.org # import os, sys import subprocess as sub import string import re import optparse tcm_dir = "" fabric_ops = [] fabric_mod_dir = "" fabric_mod_port = "" fabric_mod_init_port = "" def tcm_mod_err(msg): print msg sys.exit(1) def tcm_mod_create_module_subdir(fabric_mod_dir_var): if os.path.isdir(fabric_mod_dir_var) == True: return 1 print "Creating fabric_mod_dir: " + fabric_mod_dir_var ret = os.mkdir(fabric_mod_dir_var) if ret: tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var) return def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n" buf += " u64 nport_wwpn;\n" buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n" buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n" buf += "\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* FC lport target portal group tag for TCM */\n" buf += " u16 lport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n" buf += " struct " + fabric_mod_name + "_lport *lport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n" buf += "\n" buf += "struct " + fabric_mod_name + "_lport {\n" buf += " /* SCSI protocol the lport is providing */\n" buf += " u8 lport_proto_id;\n" buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n" buf += " u64 lport_wwpn;\n" buf += " /* ASCII formatted WWPN for FC Target Lport */\n" buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n" buf += " struct se_wwn lport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "lport" fabric_mod_init_port = "nport" return def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n" buf += " u64 iport_wwpn;\n" buf += " /* ASCII formatted WWPN for Sas Initiator port */\n" buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* SAS port target portal group tag for TCM */\n" buf += " u16 tport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" buf += " struct " + fabric_mod_name + "_tport *tport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tport {\n" buf += " /* SCSI protocol the tport is providing */\n" buf += " u8 tport_proto_id;\n" buf += " /* Binary World Wide unique Port Name for SAS Target port */\n" buf += " u64 tport_wwpn;\n" buf += " /* ASCII formatted WWPN for SAS Target port */\n" buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" buf += " struct se_wwn tport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "tport" fabric_mod_init_port = "iport" return def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name): global fabric_mod_port global fabric_mod_init_port buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n" buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n" buf += "\n" buf += "struct " + fabric_mod_name + "_nacl {\n" buf += " /* ASCII formatted InitiatorName */\n" buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n" buf += " struct se_node_acl se_node_acl;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tpg {\n" buf += " /* iSCSI target portal group tag for TCM */\n" buf += " u16 tport_tpgt;\n" buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n" buf += " struct " + fabric_mod_name + "_tport *tport;\n" buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n" buf += " struct se_portal_group se_tpg;\n" buf += "};\n\n" buf += "struct " + fabric_mod_name + "_tport {\n" buf += " /* SCSI protocol the tport is providing */\n" buf += " u8 tport_proto_id;\n" buf += " /* ASCII formatted TargetName for IQN */\n" buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n" buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n" buf += " struct se_wwn tport_wwn;\n" buf += "};\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() fabric_mod_port = "tport" fabric_mod_init_port = "iport" return def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name): if proto_ident == "FC": tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name) elif proto_ident == "SAS": tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name) elif proto_ident == "iSCSI": tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name) else: print "Unsupported proto_ident: " + proto_ident sys.exit(1) return def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c" print "Writing file: " + f p = open(f, 'w'); if not p: tcm_mod_err("Unable to open file: " + f) buf = "#include <linux/module.h>\n" buf += "#include <linux/moduleparam.h>\n" buf += "#include <linux/version.h>\n" buf += "#include <generated/utsrelease.h>\n" buf += "#include <linux/utsname.h>\n" buf += "#include <linux/init.h>\n" buf += "#include <linux/slab.h>\n" buf += "#include <linux/kthread.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/string.h>\n" buf += "#include <linux/configfs.h>\n" buf += "#include <linux/ctype.h>\n" buf += "#include <asm/unaligned.h>\n\n" buf += "#include <target/target_core_base.h>\n" buf += "#include <target/target_core_fabric.h>\n" buf += "#include <target/target_core_fabric_configfs.h>\n" buf += "#include <target/target_core_configfs.h>\n" buf += "#include <target/configfs_macros.h>\n\n" buf += "#include \"" + fabric_mod_name + "_base.h\"\n" buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" buf += "/* Local pointer to allocated TCM configfs fabric module */\n" buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n" buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n" buf += " struct " + fabric_mod_name + "_nacl *nacl;\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " u64 wwpn = 0;\n" buf += " u32 nexus_depth;\n\n" buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" buf += " return ERR_PTR(-EINVAL); */\n" buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n" buf += " if (!se_nacl_new)\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n" buf += " nexus_depth = 1;\n" buf += " /*\n" buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n" buf += " * when converting a NodeACL from demo mode -> explict\n" buf += " */\n" buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n" buf += " name, nexus_depth);\n" buf += " if (IS_ERR(se_nacl)) {\n" buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n" buf += " return se_nacl;\n" buf += " }\n" buf += " /*\n" buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n" buf += " */\n" buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n" buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" buf += " return se_nacl;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n" buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n" buf += " kfree(nacl);\n" buf += "}\n\n" buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n" buf += " struct se_wwn *wwn,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n" buf += " struct " + fabric_mod_name + "_tpg *tpg;\n" buf += " unsigned long tpgt;\n" buf += " int ret;\n\n" buf += " if (strstr(name, \"tpgt_\") != name)\n" buf += " return ERR_PTR(-EINVAL);\n" buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n" buf += " return ERR_PTR(-EINVAL);\n\n" buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n" buf += " if (!tpg) {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += " }\n" buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n" buf += " &tpg->se_tpg, (void *)tpg,\n" buf += " TRANSPORT_TPG_TYPE_NORMAL);\n" buf += " if (ret < 0) {\n" buf += " kfree(tpg);\n" buf += " return NULL;\n" buf += " }\n" buf += " return &tpg->se_tpg;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n" buf += " core_tpg_deregister(se_tpg);\n" buf += " kfree(tpg);\n" buf += "}\n\n" buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n" buf += " struct target_fabric_configfs *tf,\n" buf += " struct config_group *group,\n" buf += " const char *name)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " u64 wwpn = 0;\n\n" buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n" buf += " return ERR_PTR(-EINVAL); */\n\n" buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n" buf += " if (!" + fabric_mod_port + ") {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n" buf += " return ERR_PTR(-ENOMEM);\n" buf += " }\n" if proto_ident == "FC" or proto_ident == "SAS": buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n" buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n" buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n" buf += "}\n\n" buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n" buf += " kfree(" + fabric_mod_port + ");\n" buf += "}\n\n" buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n" buf += " struct target_fabric_configfs *tf,\n" buf += " char *page)\n" buf += "{\n" buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" buf += " utsname()->machine);\n" buf += "}\n\n" buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n" buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n" buf += " &" + fabric_mod_name + "_wwn_version.attr,\n" buf += " NULL,\n" buf += "};\n\n" buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n" buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n" buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n" buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n" buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n" buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n" buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n" buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n" buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n" buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n" buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n" buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n" buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" buf += " .close_session = " + fabric_mod_name + "_close_session,\n" buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" buf += " .sess_get_initiator_sid = NULL,\n" buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n" buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n" buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n" buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n" buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n" buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n" buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" buf += " /*\n" buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" buf += " */\n" buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n" buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n" buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" buf += " .fabric_post_link = NULL,\n" buf += " .fabric_pre_unlink = NULL,\n" buf += " .fabric_make_np = NULL,\n" buf += " .fabric_drop_np = NULL,\n" buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n" buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n" buf += "};\n\n" buf += "static int " + fabric_mod_name + "_register_configfs(void)\n" buf += "{\n" buf += " struct target_fabric_configfs *fabric;\n" buf += " int ret;\n\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n" buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n" buf += " utsname()->machine);\n" buf += " /*\n" buf += " * Register the top level struct config_item_type with TCM core\n" buf += " */\n" buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" buf += " if (IS_ERR(fabric)) {\n" buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" buf += " return PTR_ERR(fabric);\n" buf += " }\n" buf += " /*\n" buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n" buf += " */\n" buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n" buf += " /*\n" buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n" buf += " */\n" buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n" buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n" buf += " /*\n" buf += " * Register the fabric for use within TCM\n" buf += " */\n" buf += " ret = target_fabric_configfs_register(fabric);\n" buf += " if (ret < 0) {\n" buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n" buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n" buf += " return ret;\n" buf += " }\n" buf += " /*\n" buf += " * Setup our local pointer to *fabric\n" buf += " */\n" buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n" buf += " return 0;\n" buf += "};\n\n" buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n" buf += "{\n" buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n" buf += " return;\n\n" buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n" buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n" buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n" buf += "};\n\n" buf += "static int __init " + fabric_mod_name + "_init(void)\n" buf += "{\n" buf += " int ret;\n\n" buf += " ret = " + fabric_mod_name + "_register_configfs();\n" buf += " if (ret < 0)\n" buf += " return ret;\n\n" buf += " return 0;\n" buf += "};\n\n" buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" buf += "{\n" buf += " " + fabric_mod_name + "_deregister_configfs();\n" buf += "};\n\n" buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" buf += "MODULE_LICENSE(\"GPL\");\n" buf += "module_init(" + fabric_mod_name + "_init);\n" buf += "module_exit(" + fabric_mod_name + "_exit);\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_scan_fabric_ops(tcm_dir): fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h" print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api process_fo = 0; p = open(fabric_ops_api, 'r') line = p.readline() while line: if process_fo == 0 and re.search('struct target_core_fabric_ops {', line): line = p.readline() continue if process_fo == 0: process_fo = 1; line = p.readline() # Search for function pointer if not re.search('\(\*', line): continue fabric_ops.append(line.rstrip()) continue line = p.readline() # Search for function pointer if not re.search('\(\*', line): continue fabric_ops.append(line.rstrip()) p.close() return def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf = "" bufi = "" f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h" print "Writing file: " + fi pi = open(fi, 'w') if not pi: tcm_mod_err("Unable to open file: " + fi) buf = "#include <linux/slab.h>\n" buf += "#include <linux/kthread.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/list.h>\n" buf += "#include <linux/types.h>\n" buf += "#include <linux/string.h>\n" buf += "#include <linux/ctype.h>\n" buf += "#include <asm/unaligned.h>\n" buf += "#include <scsi/scsi.h>\n" buf += "#include <scsi/scsi_host.h>\n" buf += "#include <scsi/scsi_device.h>\n" buf += "#include <scsi/scsi_cmnd.h>\n" buf += "#include <scsi/libfc.h>\n\n" buf += "#include <target/target_core_base.h>\n" buf += "#include <target/target_core_fabric.h>\n" buf += "#include <target/target_core_configfs.h>\n\n" buf += "#include \"" + fabric_mod_name + "_base.h\"\n" buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n" buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n" buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n" total_fabric_ops = len(fabric_ops) i = 0 while i < total_fabric_ops: fo = fabric_ops[i] i += 1 # print "fabric_ops: " + fo if re.search('get_fabric_name', fo): buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" buf += "{\n" buf += " return \"" + fabric_mod_name[4:] + "\";\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" continue if re.search('get_fabric_proto_ident', fo): buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " u8 proto_id;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n" buf += " break;\n" buf += " }\n\n" buf += " return proto_id;\n" buf += "}\n\n" bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n" if re.search('get_wwn', fo): buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n" buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n" if re.search('get_tag', fo): buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " return tpg->" + fabric_mod_port + "_tpgt;\n" buf += "}\n\n" bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n" if re.search('get_default_depth', fo): buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n" if re.search('get_pr_transport_id\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl,\n" buf += " struct t10_pr_registration *pr_reg,\n" buf += " int *format_code,\n" buf += " unsigned char *buf)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " int ret = 0;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n" buf += " format_code, buf);\n" buf += " break;\n" buf += " }\n\n" buf += " return ret;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n" bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" bufi += " int *, unsigned char *);\n" if re.search('get_pr_transport_id_len\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl,\n" buf += " struct t10_pr_registration *pr_reg,\n" buf += " int *format_code)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " int ret = 0;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n" buf += " format_code);\n" buf += " break;\n" buf += " }\n\n" buf += " return ret;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n" bufi += " struct se_node_acl *, struct t10_pr_registration *,\n" bufi += " int *);\n" if re.search('parse_pr_out_transport_id\)\(', fo): buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " const char *buf,\n" buf += " u32 *out_tid_len,\n" buf += " char **port_nexus_ptr)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n" buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n" buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n" buf += " char *tid = NULL;\n\n" buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n" if proto_ident == "FC": buf += " case SCSI_PROTOCOL_FCP:\n" buf += " default:\n" buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" elif proto_ident == "SAS": buf += " case SCSI_PROTOCOL_SAS:\n" buf += " default:\n" buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" elif proto_ident == "iSCSI": buf += " case SCSI_PROTOCOL_ISCSI:\n" buf += " default:\n" buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n" buf += " port_nexus_ptr);\n" buf += " }\n\n" buf += " return tid;\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n" bufi += " const char *, u32 *, char **);\n" if re.search('alloc_fabric_acl\)\(', fo): buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n" buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n" buf += " if (!nacl) {\n" buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n" buf += " return NULL;\n" buf += " }\n\n" buf += " return &nacl->se_node_acl;\n" buf += "}\n\n" bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n" if re.search('release_fabric_acl\)\(', fo): buf += "void " + fabric_mod_name + "_release_fabric_acl(\n" buf += " struct se_portal_group *se_tpg,\n" buf += " struct se_node_acl *se_nacl)\n" buf += "{\n" buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n" buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n" buf += " kfree(nacl);\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n" bufi += " struct se_node_acl *);\n" if re.search('tpg_get_inst_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n" buf += "{\n" buf += " return 1;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n" if re.search('\*release_cmd\)\(', fo): buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n" if re.search('shutdown_session\)\(', fo): buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n" if re.search('close_session\)\(', fo): buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" if re.search('stop_session\)\(', fo): buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" if re.search('fall_back_to_erl0\)\(', fo): buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" if re.search('sess_logged_in\)\(', fo): buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" if re.search('sess_get_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n" if re.search('write_pending\)\(', fo): buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n" if re.search('write_pending_status\)\(', fo): buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n" if re.search('set_default_node_attributes\)\(', fo): buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n" buf += "{\n" buf += " return;\n" buf += "}\n\n" bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n" if re.search('get_task_tag\)\(', fo): buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n" if re.search('get_cmd_state\)\(', fo): buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n" if re.search('queue_data_in\)\(', fo): buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n" if re.search('queue_status\)\(', fo): buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" if re.search('queue_tm_rsp\)\(', fo): buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" if re.search('get_fabric_sense_len\)\(', fo): buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n" if re.search('set_fabric_sense_len\)\(', fo): buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n" if re.search('is_state_remove\)\(', fo): buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" buf += "{\n" buf += " return 0;\n" buf += "}\n\n" bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() ret = pi.write(bufi) if ret: tcm_mod_err("Unable to write fi: " + fi) pi.close() return def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/Makefile" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n" buf += " " + fabric_mod_name + "_configfs.o\n" buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name): buf = "" f = fabric_mod_dir_var + "/Kconfig" print "Writing file: " + f p = open(f, 'w') if not p: tcm_mod_err("Unable to open file: " + f) buf = "config " + fabric_mod_name.upper() + "\n" buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n" buf += " depends on TARGET_CORE && CONFIGFS_FS\n" buf += " default n\n" buf += " ---help---\n" buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n" ret = p.write(buf) if ret: tcm_mod_err("Unable to write f: " + f) p.close() return def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name): buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n" kbuild = tcm_dir + "/drivers/target/Makefile" f = open(kbuild, 'a') f.write(buf) f.close() return def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name): buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n" kconfig = tcm_dir + "/drivers/target/Kconfig" f = open(kconfig, 'a') f.write(buf) f.close() return def main(modname, proto_ident): # proto_ident = "FC" # proto_ident = "SAS" # proto_ident = "iSCSI" tcm_dir = os.getcwd(); tcm_dir += "/../../" print "tcm_dir: " + tcm_dir fabric_mod_name = modname fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name print "Set fabric_mod_name: " + fabric_mod_name print "Set fabric_mod_dir: " + fabric_mod_dir print "Using proto_ident: " + proto_ident if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI": print "Unsupported proto_ident: " + proto_ident sys.exit(1) ret = tcm_mod_create_module_subdir(fabric_mod_dir) if ret: print "tcm_mod_create_module_subdir() failed because module already exists!" sys.exit(1) tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_scan_fabric_ops(tcm_dir) tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name) tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) return parser = optparse.OptionParser() parser.add_option('-m', '--modulename', help='Module name', dest='modname', action='store', nargs=1, type='string') parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident', action='store', nargs=1, type='string') (opts, args) = parser.parse_args() mandatories = ['modname', 'protoident'] for m in mandatories: if not opts.__dict__[m]: print "mandatory option is missing\n" parser.print_help() exit(-1) if __name__ == "__main__": main(str(opts.modname), opts.protoident)
Andrey-Pavlov/robomongo
refs/heads/master
src/third-party/mongodb/src/third_party/v8/tools/disasm.py
25
#!/usr/bin/env python # # Copyright 2011 the V8 project authors. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import re import subprocess import tempfile # Avoid using the slow (google-specific) wrapper around objdump. OBJDUMP_BIN = "/usr/bin/objdump" if not os.path.exists(OBJDUMP_BIN): OBJDUMP_BIN = "objdump" _COMMON_DISASM_OPTIONS = ["-M", "intel-mnemonic", "-C"] _DISASM_HEADER_RE = re.compile(r"[a-f0-9]+\s+<.*:$") _DISASM_LINE_RE = re.compile(r"\s*([a-f0-9]+):\s*(\S.*)") # Keys must match constants in Logger::LogCodeInfo. _ARCH_MAP = { "ia32": "-m i386", "x64": "-m i386 -M x86-64", "arm": "-m arm", # Not supported by our objdump build. "mips": "-m mips" # Not supported by our objdump build. } def GetDisasmLines(filename, offset, size, arch, inplace): tmp_name = None if not inplace: # Create a temporary file containing a copy of the code. assert arch in _ARCH_MAP, "Unsupported architecture '%s'" % arch arch_flags = _ARCH_MAP[arch] tmp_name = tempfile.mktemp(".v8code") command = "dd if=%s of=%s bs=1 count=%d skip=%d && " \ "%s %s -D -b binary %s %s" % ( filename, tmp_name, size, offset, OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS), arch_flags, tmp_name) else: command = "%s %s --start-address=%d --stop-address=%d -d %s " % ( OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS), offset, offset + size, filename) process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, err = process.communicate() lines = out.split("\n") header_line = 0 for i, line in enumerate(lines): if _DISASM_HEADER_RE.match(line): header_line = i break if tmp_name: os.unlink(tmp_name) split_lines = [] for line in lines[header_line + 1:]: match = _DISASM_LINE_RE.match(line) if match: line_address = int(match.group(1), 16) split_lines.append((line_address, match.group(2))) return split_lines
keithrob/openshift-node4
refs/heads/master
bin/node/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypsh.py
2779
# Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """gypsh output module gypsh is a GYP shell. It's not really a generator per se. All it does is fire up an interactive Python session with a few local variables set to the variables passed to the generator. Like gypd, it's intended as a debugging aid, to facilitate the exploration of .gyp structures after being processed by the input module. The expected usage is "gyp -f gypsh -D OS=desired_os". """ import code import sys # All of this stuff about generator variables was lovingly ripped from gypd.py. # That module has a much better description of what's going on and why. _generator_identity_variables = [ 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'INTERMEDIATE_DIR', 'PRODUCT_DIR', 'RULE_INPUT_ROOT', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'RULE_INPUT_NAME', 'RULE_INPUT_PATH', 'SHARED_INTERMEDIATE_DIR', ] generator_default_variables = { } for v in _generator_identity_variables: generator_default_variables[v] = '<(%s)' % v def GenerateOutput(target_list, target_dicts, data, params): locals = { 'target_list': target_list, 'target_dicts': target_dicts, 'data': data, } # Use a banner that looks like the stock Python one and like what # code.interact uses by default, but tack on something to indicate what # locals are available, and identify gypsh. banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \ (sys.version, sys.platform, repr(sorted(locals.keys()))) code.interact(banner, local=locals)
gfreed/android_external_chromium-org
refs/heads/android-4.4
tools/perf/metrics/smoothness_unittest.py
23
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import unittest from metrics import smoothness from metrics.gpu_rendering_stats import GpuRenderingStats from telemetry.page import page from telemetry.page.page_measurement_results import PageMeasurementResults class SmoothnessMetricsUnitTest(unittest.TestCase): def testCalcResultsRealRenderStats(self): mock_rendering_stats_deltas = { 'totalTimeInSeconds': 1.0, 'numFramesSentToScreen': 100, 'droppedFrameCount': 20, 'numImplThreadScrolls': 50, 'numMainThreadScrolls': 50, 'numLayersDrawn': 240, 'numMissingTiles': 10, 'textureUploadCount': 120, 'totalTextureUploadTimeInSeconds': 1.2, 'totalCommitCount': 130, 'totalCommitTimeInSeconds': 1.3, 'totalDeferredImageDecodeCount': 140, 'totalDeferredImageDecodeTimeInSeconds': 1.4, 'totalDeferredImageCacheHitCount': 30, 'totalImageGatheringCount': 150, 'totalImageGatheringTimeInSeconds': 1.5, 'totalTilesAnalyzed': 160, 'totalTileAnalysisTimeInSeconds': 1.6, 'solidColorTilesAnalyzed': 40, 'inputEventCount': 170, 'totalInputLatency': 1.7, 'touchUICount': 180, 'totalTouchUILatency': 1.8, 'touchAckedCount': 190, 'totalTouchAckedLatency': 1.9, 'scrollUpdateCount': 200, 'totalScrollUpdateLatency': 2.0} stats = GpuRenderingStats(mock_rendering_stats_deltas) res = PageMeasurementResults() res.WillMeasurePage(page.Page('http://foo.com/', None)) smoothness.CalcResults(stats, res) res.DidMeasurePage() # Scroll Results self.assertAlmostEquals( 1.0 / 100.0 * 1000.0, res.page_results[0]['mean_frame_time'].value, 2) self.assertAlmostEquals( 20.0 / 100.0 * 100.0, res.page_results[0]['dropped_percent'].value) self.assertAlmostEquals( 50.0 / (50.0 + 50.0) * 100.0, res.page_results[0]['percent_impl_scrolled'].value) self.assertAlmostEquals( 240.0 / 100.0, res.page_results[0]['average_num_layers_drawn'].value) self.assertAlmostEquals( 10.0 / 100.0, res.page_results[0]['average_num_missing_tiles'].value) # Texture Upload Results self.assertAlmostEquals( 1.3 / 130.0 * 1000.0, res.page_results[0]['average_commit_time'].value) self.assertEquals( 120, res.page_results[0]['texture_upload_count'].value) self.assertEquals( 1.2, res.page_results[0]['total_texture_upload_time'].value) # Image Decoding Results self.assertEquals( 140, res.page_results[0]['total_deferred_image_decode_count'].value) self.assertEquals( 30, res.page_results[0]['total_image_cache_hit_count'].value) self.assertAlmostEquals( 1.5 / 150.0 * 1000.0, res.page_results[0]['average_image_gathering_time'].value) self.assertEquals( 1.4, res.page_results[0]['total_deferred_image_decoding_time'].value) # Tile Analysis Results self.assertEquals( 160, res.page_results[0]['total_tiles_analyzed'].value) self.assertEquals( 40, res.page_results[0]['solid_color_tiles_analyzed'].value) self.assertAlmostEquals( 1.6 / 160.0 * 1000.0, res.page_results[0]['average_tile_analysis_time'].value) # Latency Results self.assertAlmostEquals( 1.7 / 170.0 * 1000.0, res.page_results[0]['average_latency'].value) self.assertAlmostEquals( 1.8 / 180.0 * 1000.0, res.page_results[0]['average_touch_ui_latency'].value) self.assertAlmostEquals( 1.9 / 190.0 * 1000.0, res.page_results[0]['average_touch_acked_latency'].value) self.assertAlmostEquals( 2.0 / 200.0 * 1000.0, res.page_results[0]['average_scroll_update_latency'].value)
mrshelly/openerp71313
refs/heads/master
openerp/addons/l10n_be_hr_payroll/__init__.py
438
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011 OpenERP SA (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## import l10n_be_hr_payroll # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
krissrex/python_projects
refs/heads/master
Projects/nodeGraph.py
1
# -*- coding: utf-8 -*- """ Created on Wed Aug 27 22:23:08 2014 @author: kristian """ import tkinter from sys import argv class Link(object): def __init__(self, startNode, endNode): self.setStart(startNode) self.setEnd(endNode) self.l = None def setStart(self, node): if isinstance(node, Node): self.start = node node.links.append(self) def setEnd(self, node): if isinstance(node, Node): self.end = node node.links.append(self) class Node(object): def __init__(self, x=0, y=0): self.x = x self.y = y self.links = [] class NodeGrapher(object): def __init__(self, V=[], E=[]): self.V = self.getValidElementsList(V, Node) self.E = self.getValidElementsList(E, Link) self.selectedLinkStart = None self.selectedLinkEnd = None self.selectedNode = None self._initGUI() def show(self): tkinter.mainloop() def getValidElementsList(self, elementList, className): return [element for element in elementList if isinstance(element, className)] def _initGUI(self): self.main = tkinter.Tk() self.main.title("Node Grapher") self.main.bind("<Button-1>", self._clicked) self.main.bind("<B1-Motion>", self._dragging) self.main.bind("<ButtonRelease-1>", self._released) self.main.bind("<Delete>", self._delete_all) # For adding nodes self.main.bind("<Control-Button-1>", self._ctrlClicked) # Add a canvas to draw points and lines self.canvas = tkinter.Canvas(self.main, width=680, height=500) # For rightclicking a node self.canvas.tag_bind("node", "<Button-3>", self._nodeRightClicked) self.canvas.tag_bind("node", "<Control-Button-3>", self._nodeCtrlRightClicked) self.canvas.pack() # Draw nodes self._render() def _ctrlClicked(self, event): x = self.canvas.canvasx(event.x) y = self.canvas.canvasy(event.y) node = Node(x, y) self.V.append(node) self._drawNode(node) def _nodeRightClicked(self, event): x, y = self._canvas_coords(event) node = self._getClosestNode(x, y) # Select the node and color its point, if one has not been selected already if not self.selectedLinkStart: self.selectedLinkStart = node self.canvas.itemconfig(node.p, tags=("node", "selected"), fill="red") else: # If the same node was selected twice, reset it. # Otherwise, the node was the end node. if node == self.selectedLinkStart: self.canvas.itemconfig(self.selectedLinkStart.p, tags="node", fill="cyan") self.selectedLinkStart = None else: self.selectedLinkEnd = node self._connectNodes() def _nodeCtrlRightClicked(self, event): x, y = self._canvas_coords(event) node = self._getClosestNode(x, y) if not self.selectedLinkStart: self.selectedLinkStart = node self.canvas.itemconfig(node.p, tags=("node", "selected"), fill="red") else: # Use [:] to make a copy of the list, to avoid breaking iteration for link in self.selectedLinkStart.links[:]: if node == link.end or node == link.start: try: link.end.links.remove(link) except Exception: pass try: link.start.links.remove(link) except Exception: pass try: self.E.remove(link) except Exception: pass self.canvas.delete(link.l) def _delete_all(self, event): """Delete all lines and links. If there are none, delete all nodes and points.""" if self.E: self.E = [] self.canvas.delete("link") elif self.V: self.V = [] self.canvas.delete("node") def _canvas_coords(self, event): return (self.canvas.canvasx(event.x), self.canvas.canvasy(event.y)) def _connectNodes(self): """Link the nodes assigned as selected in the NodeGrapher and draw them.""" if isinstance(self.selectedLinkStart, Node) \ and isinstance(self.selectedLinkEnd, Node): # Create a Link with the two selected nodes link = Link(self.selectedLinkStart, self.selectedLinkEnd) self.selectedLinkStart.links.append(link) self.selectedLinkEnd.links.append(link) self.E.append(link) self._drawLink(link) self._sendLinksBack() self.canvas.itemconfig(self.selectedLinkStart.p, tags="node", fill="cyan") # Clear the selection. self.selectedLinkStart = None self.selectedLinkEnd = None def _clicked(self, event): x, y = self._canvas_coords(event) self.selectedNode = self._getClosestNode(x, y) def _dragging(self, event): node = self.selectedNode if node: node.x = event.x node.y = event.y # update the drawn point with id in node.p and radius 5 self.canvas.coords(node.p, int(node.x-5), int(node.y-5), int(node.x+5), int(node.y+5)) for link in node.links: s = link.start e = link.end # update the drawn line with id in link.l # and start in s.x, s.y and end e.x, e.y self.canvas.coords(link.l, s.x, s.y, e.x, e.y) def _released(self, event): self.selectedNode = None def _render(self): self.canvas.create_rectangle(0, 0, 680, 500, fill="green") for point in self.V: self._drawNode(point) for line in self.E: self._drawLink(line) self._sendLinksBack() def _drawNode(self, node): points = [node.x-5, node.y-5, node.x+5, node.y+5] node.p = self.canvas.create_oval(points, fill="cyan", tags="node") def _drawLink(self, link): start = link.start end = link.end points = [start.x, start.y, end.x, end.y] # Create a line in the canvas and assign its id to link.l link.l = self.canvas.create_line(points, tags="link") def _sendLinksBack(self): """Sends all lines in the canvas innwards so they are behind the points.""" # Lower all elements with tag 'link' below those with tag 'node' try: self.canvas.tag_lower("link", "node") except: print("Failed to send back lines. Perhaps there are none?") def _getClosestNode(self, x, y): if not self.V: # No nodes in the list of vertices. return def distance_squared(node): # No need to find the square root, as it is just comparison. return (node.x - x)**2 + (node.y - y)**2 # Initial distance and value dist = distance_squared(self.V[0]) closest_node = self.V[0] #Iterate over nodes and find closest for node in self.V: current_dist = distance_squared(node) if current_dist <= dist: closest_node = node dist = current_dist return closest_node def main(args): nodes = [] edges = [] if "debug" in args: for x in range(0, 500, 50): nodes.append(Node(0.5*x+x % 100, x+25)) c = 0 while (c < len(nodes)-1): edges.append(Link(nodes[c], nodes[c+1])) if c == 3: edges.append(Link(nodes[c], nodes[c+2])) c += 1 window = NodeGrapher(nodes, edges) window.show() if __name__ == "__main__": main(argv)
enitihas/SAC-Website
refs/heads/master
venv/bin/venv/lib/python2.7/site-packages/flask/ctx.py
776
# -*- coding: utf-8 -*- """ flask.ctx ~~~~~~~~~ Implements the objects required to keep the context. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ from __future__ import with_statement import sys from functools import update_wrapper from werkzeug.exceptions import HTTPException from .globals import _request_ctx_stack, _app_ctx_stack from .module import blueprint_is_module from .signals import appcontext_pushed, appcontext_popped class _AppCtxGlobals(object): """A plain object.""" def get(self, name, default=None): return self.__dict__.get(name, default) def __contains__(self, item): return item in self.__dict__ def __iter__(self): return iter(self.__dict__) def __repr__(self): top = _app_ctx_stack.top if top is not None: return '<flask.g of %r>' % top.app.name return object.__repr__(self) def after_this_request(f): """Executes a function after this request. This is useful to modify response objects. The function is passed the response object and has to return the same or a new one. Example:: @app.route('/') def index(): @after_this_request def add_header(response): response.headers['X-Foo'] = 'Parachute' return response return 'Hello World!' This is more useful if a function other than the view function wants to modify a response. For instance think of a decorator that wants to add some headers without converting the return value into a response object. .. versionadded:: 0.9 """ _request_ctx_stack.top._after_request_functions.append(f) return f def copy_current_request_context(f): """A helper function that decorates a function to retain the current request context. This is useful when working with greenlets. The moment the function is decorated a copy of the request context is created and then pushed when the function is called. Example:: import gevent from flask import copy_current_request_context @app.route('/') def index(): @copy_current_request_context def do_some_work(): # do some work here, it can access flask.request like you # would otherwise in the view function. ... gevent.spawn(do_some_work) return 'Regular response' .. versionadded:: 0.10 """ top = _request_ctx_stack.top if top is None: raise RuntimeError('This decorator can only be used at local scopes ' 'when a request context is on the stack. For instance within ' 'view functions.') reqctx = top.copy() def wrapper(*args, **kwargs): with reqctx: return f(*args, **kwargs) return update_wrapper(wrapper, f) def has_request_context(): """If you have code that wants to test if a request context is there or not this function can be used. For instance, you may want to take advantage of request information if the request object is available, but fail silently if it is unavailable. :: class User(db.Model): def __init__(self, username, remote_addr=None): self.username = username if remote_addr is None and has_request_context(): remote_addr = request.remote_addr self.remote_addr = remote_addr Alternatively you can also just test any of the context bound objects (such as :class:`request` or :class:`g` for truthness):: class User(db.Model): def __init__(self, username, remote_addr=None): self.username = username if remote_addr is None and request: remote_addr = request.remote_addr self.remote_addr = remote_addr .. versionadded:: 0.7 """ return _request_ctx_stack.top is not None def has_app_context(): """Works like :func:`has_request_context` but for the application context. You can also just do a boolean check on the :data:`current_app` object instead. .. versionadded:: 0.9 """ return _app_ctx_stack.top is not None class AppContext(object): """The application context binds an application object implicitly to the current thread or greenlet, similar to how the :class:`RequestContext` binds request information. The application context is also implicitly created if a request context is created but the application is not on top of the individual application context. """ def __init__(self, app): self.app = app self.url_adapter = app.create_url_adapter(None) self.g = app.app_ctx_globals_class() # Like request context, app contexts can be pushed multiple times # but there a basic "refcount" is enough to track them. self._refcnt = 0 def push(self): """Binds the app context to the current context.""" self._refcnt += 1 _app_ctx_stack.push(self) appcontext_pushed.send(self.app) def pop(self, exc=None): """Pops the app context.""" self._refcnt -= 1 if self._refcnt <= 0: if exc is None: exc = sys.exc_info()[1] self.app.do_teardown_appcontext(exc) rv = _app_ctx_stack.pop() assert rv is self, 'Popped wrong app context. (%r instead of %r)' \ % (rv, self) appcontext_popped.send(self.app) def __enter__(self): self.push() return self def __exit__(self, exc_type, exc_value, tb): self.pop(exc_value) class RequestContext(object): """The request context contains all request relevant information. It is created at the beginning of the request and pushed to the `_request_ctx_stack` and removed at the end of it. It will create the URL adapter and request object for the WSGI environment provided. Do not attempt to use this class directly, instead use :meth:`~flask.Flask.test_request_context` and :meth:`~flask.Flask.request_context` to create this object. When the request context is popped, it will evaluate all the functions registered on the application for teardown execution (:meth:`~flask.Flask.teardown_request`). The request context is automatically popped at the end of the request for you. In debug mode the request context is kept around if exceptions happen so that interactive debuggers have a chance to introspect the data. With 0.4 this can also be forced for requests that did not fail and outside of `DEBUG` mode. By setting ``'flask._preserve_context'`` to `True` on the WSGI environment the context will not pop itself at the end of the request. This is used by the :meth:`~flask.Flask.test_client` for example to implement the deferred cleanup functionality. You might find this helpful for unittests where you need the information from the context local around for a little longer. Make sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in that situation, otherwise your unittests will leak memory. """ def __init__(self, app, environ, request=None): self.app = app if request is None: request = app.request_class(environ) self.request = request self.url_adapter = app.create_url_adapter(self.request) self.flashes = None self.session = None # Request contexts can be pushed multiple times and interleaved with # other request contexts. Now only if the last level is popped we # get rid of them. Additionally if an application context is missing # one is created implicitly so for each level we add this information self._implicit_app_ctx_stack = [] # indicator if the context was preserved. Next time another context # is pushed the preserved context is popped. self.preserved = False # remembers the exception for pop if there is one in case the context # preservation kicks in. self._preserved_exc = None # Functions that should be executed after the request on the response # object. These will be called before the regular "after_request" # functions. self._after_request_functions = [] self.match_request() # XXX: Support for deprecated functionality. This is going away with # Flask 1.0 blueprint = self.request.blueprint if blueprint is not None: # better safe than sorry, we don't want to break code that # already worked bp = app.blueprints.get(blueprint) if bp is not None and blueprint_is_module(bp): self.request._is_old_module = True def _get_g(self): return _app_ctx_stack.top.g def _set_g(self, value): _app_ctx_stack.top.g = value g = property(_get_g, _set_g) del _get_g, _set_g def copy(self): """Creates a copy of this request context with the same request object. This can be used to move a request context to a different greenlet. Because the actual request object is the same this cannot be used to move a request context to a different thread unless access to the request object is locked. .. versionadded:: 0.10 """ return self.__class__(self.app, environ=self.request.environ, request=self.request ) def match_request(self): """Can be overridden by a subclass to hook into the matching of the request. """ try: url_rule, self.request.view_args = \ self.url_adapter.match(return_rule=True) self.request.url_rule = url_rule except HTTPException as e: self.request.routing_exception = e def push(self): """Binds the request context to the current context.""" # If an exception occurs in debug mode or if context preservation is # activated under exception situations exactly one context stays # on the stack. The rationale is that you want to access that # information under debug situations. However if someone forgets to # pop that context again we want to make sure that on the next push # it's invalidated, otherwise we run at risk that something leaks # memory. This is usually only a problem in testsuite since this # functionality is not active in production environments. top = _request_ctx_stack.top if top is not None and top.preserved: top.pop(top._preserved_exc) # Before we push the request context we have to ensure that there # is an application context. app_ctx = _app_ctx_stack.top if app_ctx is None or app_ctx.app != self.app: app_ctx = self.app.app_context() app_ctx.push() self._implicit_app_ctx_stack.append(app_ctx) else: self._implicit_app_ctx_stack.append(None) _request_ctx_stack.push(self) # Open the session at the moment that the request context is # available. This allows a custom open_session method to use the # request context (e.g. code that access database information # stored on `g` instead of the appcontext). self.session = self.app.open_session(self.request) if self.session is None: self.session = self.app.make_null_session() def pop(self, exc=None): """Pops the request context and unbinds it by doing that. This will also trigger the execution of functions registered by the :meth:`~flask.Flask.teardown_request` decorator. .. versionchanged:: 0.9 Added the `exc` argument. """ app_ctx = self._implicit_app_ctx_stack.pop() clear_request = False if not self._implicit_app_ctx_stack: self.preserved = False self._preserved_exc = None if exc is None: exc = sys.exc_info()[1] self.app.do_teardown_request(exc) # If this interpreter supports clearing the exception information # we do that now. This will only go into effect on Python 2.x, # on 3.x it disappears automatically at the end of the exception # stack. if hasattr(sys, 'exc_clear'): sys.exc_clear() request_close = getattr(self.request, 'close', None) if request_close is not None: request_close() clear_request = True rv = _request_ctx_stack.pop() assert rv is self, 'Popped wrong request context. (%r instead of %r)' \ % (rv, self) # get rid of circular dependencies at the end of the request # so that we don't require the GC to be active. if clear_request: rv.request.environ['werkzeug.request'] = None # Get rid of the app as well if necessary. if app_ctx is not None: app_ctx.pop(exc) def auto_pop(self, exc): if self.request.environ.get('flask._preserve_context') or \ (exc is not None and self.app.preserve_context_on_exception): self.preserved = True self._preserved_exc = exc else: self.pop(exc) def __enter__(self): self.push() return self def __exit__(self, exc_type, exc_value, tb): # do not pop the request stack if we are in debug mode and an # exception happened. This will allow the debugger to still # access the request object in the interactive shell. Furthermore # the context can be force kept alive for the test client. # See flask.testing for how this works. self.auto_pop(exc_value) def __repr__(self): return '<%s \'%s\' [%s] of %s>' % ( self.__class__.__name__, self.request.url, self.request.method, self.app.name, )
booto/dolphin
refs/heads/master
Tools/symbolicate-ppc.py
132
#!/usr/bin/python # This filter replace all occurences of JIT_PPC_${address} by a # corresponding function name JIT_PPC_${symbol} as defined by a .map file. # TODO, add an option to append the block address (JIT_PPC_${symbol}@${addr}) # Example 1: guest function profiling (excluding host callees) # # $ perf record -t $tid # $ perf script | sed 's/.*cycles: *[0-9a-f]* *//' | # python Tools/symbolicate-ppc.py ~/.dolphin-emu/Maps/${map}.map | # rankor -r | head # 10.05% JIT_Loop (/tmp/perf-15936.map) # 3.73% [unknown] (/tmp/perf-15936.map) # 1.91% VideoBackendHardware::Video_GatherPipeBursted (/opt/dolphin-2015-05-06/bin/dolphin-emu) # 1.39% JIT_PPC_PSMTXConcat (/tmp/perf-15936.map) # 1.00% JIT_PPC_zz_051754c_ (/tmp/perf-15936.map) # 0.90% JIT_PPC_zz_051751c_ (/tmp/perf-15936.map) # 0.71% JIT_PPC_zz_04339d4_ (/tmp/perf-15936.map) # 0.59% JIT_PPC_zz_05173e0_ (/tmp/perf-15936.map) # 0.57% JIT_PPC_zz_044141c_ (/tmp/perf-15936.map) # 0.54% JIT_PPC_zz_01839cc_ (/tmp/perf-15936.map) # Example 2: guest function profiling (including host callees) # # $ perf record --call-graph dwarf -t $tid # $ perf script | stackcollapse-perf.pl | sed 's/^CPU;//' | # python Tools/symbolicate-ppc.py ~/.dolphin-emu/Maps/${map}.map | # perl -pe 's/^([^; ]*).*? ([0-9]+?)$/\1 \2/' | stackcollapse-recursive.pl | # awk '{printf "%s %s\n", $2, $1}' | sort -rn | head # 5811 JIT_Loop # 2396 [unknown] # 577 JIT_PPC_PSMTXConcat # 464 JIT_PPC___restore_gpr # 396 JIT_PPC_zz_0517514_ # 313 JIT_PPC_zz_04339d4_ # 290 JIT_PPC_zz_05173e0_ # 285 JIT_PPC_zz_01839cc_ # 277 JIT_PPC_zz_04335ac_ # 269 JIT_PPC_zz_0420b58_ import re import sys stdin = sys.stdin stdout = sys.stdout class Symbol: def __init__(self, start, size, name): self.start = start self.end = start + size self.name = name # Read a .map file: this is a line-oriented file containing mapping from # the (PowerPC) memory addresses to function names. # The format is: "%08x %08x %08x %i %s" (address, size, address, 0, name). # They should be already be sorted. def read_map(filename): reg = re.compile("^([0-9a-f]{8}) ([0-9a-f]{8}) ([0-9a-f]{8}) ([0-9]*) (.*)$") res = [] with open(filename, "r") as f: for line in f: match = reg.match(line) if match: start = int(match.group(1), 16) size = int(match.group(2), 16) name = match.group(5) res.append(Symbol(start, size, name)) return res map = read_map(sys.argv[1]) # Do a binary each in the map file in order to find the symbol: def lookup(address): i = 0 j = len(map) while(True): if (j < i): return "JIT_PPC_[unknown]" k = round((j + i) // 2) if (address < map[k].start): j = k - 1 elif (address >= map[k].end): i = k + 1 else: return "JIT_PPC_" + map[k].name # Function used to replace given match: def replace(match): return lookup(int(match.group(1), 16)) # Process stdin and write to stdout: for line in stdin: modline = re.sub('JIT_PPC_([0-9a-f]*)', replace, line) stdout.write(modline)
CamLib/AltmetricClient
refs/heads/master
tests_issues/test_issue9.py
1
import configparser import os from altmetric_client.altmetric_api_config import AltmetricAPIConfig from altmetric_client.output_writer_csv.csv_writer_master import CSVWriterMaster from altmetric_client.url_builder import URLBuilder from altmetric_client.altmetric_request import AltmetricRequest from altmetric_client.altmetric_loader import AltmetricLoader class TestIssue9: def setup_method(self): config_data = configparser.ConfigParser() self.files_out_directory = "../files_out/" self.test_output_file_name = "test_issue9results.csv" self.filepath = '{0}{1}'.format(self.files_out_directory, self.test_output_file_name) if os.path.isfile(self.filepath): os.remove(self.filepath) with open('../config.ini') as config_file: config_data.read_file(config_file) api_config = AltmetricAPIConfig() api_config.api_base_uri = config_data['api.altmetric.com']['APIBaseURI'] api_config.api_version = config_data['api.altmetric.com']['APIVersion'] api_config.api_base_command = config_data['api.altmetric.com']['APIBaseCommand'] api_config.api_requested_item_id_type = config_data['api.altmetric.com']['APIRequestedItemIdType'] api_config.api_key = config_data['api.altmetric.com']['APIKey'] url_builder = URLBuilder(api_config) self.altmetric_request = AltmetricRequest(url_builder) self.altmetric_loader = AltmetricLoader() self.csv_writer = CSVWriterMaster(self.test_output_file_name, self.files_out_directory) def _run_test_with_doi(self, doi): self.altmetric_request.doi_to_request = doi altmetric_data = self.altmetric_request.request() altmetric = self.altmetric_loader.parse_result(altmetric_data) altmetric.doi = doi self.csv_writer.altmetric = altmetric self.csv_writer.write_master() def test_doi_ends_with_61611_0(self): self._run_test_with_doi('10.1016/s0140-6736(12)61611-0') assert os.path.isfile(self.filepath) def test_doi_ends_with_10137_x(self): self._run_test_with_doi('10.1016/s0140-6736(98)10137-x') assert os.path.isfile(self.filepath) def test_doi_ends_with_15264(self): self._run_test_with_doi('10.1017/s0022143000015264') assert os.path.isfile(self.filepath) def test_doi_ends_with_p0048(self): self._run_test_with_doi('10.20965/jdr.2012.p0048') assert os.path.isfile(self.filepath) def test_doi_ends_with_166983(self): self._run_test_with_doi('10.1145/2166966.2166983') assert os.path.isfile(self.filepath) def test_doi_ends_with_45357(self): self._run_test_with_doi('10.1145/2145204.2145347') assert os.path.isfile(self.filepath)
defionscode/ansible
refs/heads/devel
lib/ansible/modules/system/mount.py
13
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2012, Red Hat, inc # Written by Seth Vidal # based on the mount modules from salt and puppet # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'core'} DOCUMENTATION = ''' --- module: mount short_description: Control active and configured mount points description: - This module controls active and configured mount points in C(/etc/fstab). author: - Ansible Core Team - Seth Vidal version_added: "0.6" options: path: description: - Path to the mount point (e.g. C(/mnt/files)). - Before 2.3 this option was only usable as I(dest), I(destfile) and I(name). required: true aliases: [ name ] src: description: - Device to be mounted on I(path). Required when I(state) set to C(present) or C(mounted). fstype: description: - Filesystem type. Required when I(state) is C(present) or C(mounted). opts: description: - Mount options (see fstab(5), or vfstab(4) on Solaris). dump: description: - Dump (see fstab(5)). Note that if set to C(null) and I(state) set to C(present), it will cease to work and duplicate entries will be made with subsequent runs. - Has no effect on Solaris systems. default: 0 passno: description: - Passno (see fstab(5)). Note that if set to C(null) and I(state) set to C(present), it will cease to work and duplicate entries will be made with subsequent runs. - Deprecated on Solaris systems. default: 0 state: description: - If C(mounted), the device will be actively mounted and appropriately configured in I(fstab). If the mount point is not present, the mount point will be created. - If C(unmounted), the device will be unmounted without changing I(fstab). - C(present) only specifies that the device is to be configured in I(fstab) and does not trigger or require a mount. - C(absent) specifies that the device mount's entry will be removed from I(fstab) and will also unmount the device and remove the mount point. required: true choices: [ absent, mounted, present, unmounted ] fstab: description: - File to use instead of C(/etc/fstab). You shouldn't use this option unless you really know what you are doing. This might be useful if you need to configure mountpoints in a chroot environment. OpenBSD does not allow specifying alternate fstab files with mount so do not use this on OpenBSD with any state that operates on the live filesystem. default: /etc/fstab (/etc/vfstab on Solaris) boot: description: - Determines if the filesystem should be mounted on boot. - Only applies to Solaris systems. type: bool default: 'yes' version_added: '2.2' backup: description: - Create a backup file including the timestamp information so you can get the original file back if you somehow clobbered it incorrectly. required: false type: bool default: "no" version_added: '2.5' notes: - As of Ansible 2.3, the I(name) option has been changed to I(path) as default, but I(name) still works as well. ''' EXAMPLES = ''' # Before 2.3, option 'name' was used instead of 'path' - name: Mount DVD read-only mount: path: /mnt/dvd src: /dev/sr0 fstype: iso9660 opts: ro,noauto state: present - name: Mount up device by label mount: path: /srv/disk src: LABEL=SOME_LABEL fstype: ext4 state: present - name: Mount up device by UUID mount: path: /home src: UUID=b3e48f45-f933-4c8e-a700-22a159ec9077 fstype: xfs opts: noatime state: present - name: Unmount a mounted volume mount: path: /tmp/mnt-pnt state: unmounted - name: Mount and bind a volume mount: path: /system/new_volume/boot src: /boot opts: bind state: mounted fstype: none ''' import os from ansible.module_utils.basic import AnsibleModule, get_platform from ansible.module_utils.ismount import ismount from ansible.module_utils.six import iteritems from ansible.module_utils._text import to_native def write_fstab(module, lines, path): if module.params['backup']: module.backup_local(path) fs_w = open(path, 'w') for l in lines: fs_w.write(l) fs_w.flush() fs_w.close() def _escape_fstab(v): """Escape invalid characters in fstab fields. space (040) ampersand (046) backslash (134) """ if isinstance(v, int): return v else: return( v. replace('\\', '\\134'). replace(' ', '\\040'). replace('&', '\\046')) def set_mount(module, args): """Set/change a mount point location in fstab.""" to_write = [] exists = False changed = False escaped_args = dict([(k, _escape_fstab(v)) for k, v in iteritems(args)]) new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' if get_platform() == 'SunOS': new_line = ( '%(src)s - %(name)s %(fstype)s %(passno)s %(boot)s %(opts)s\n') for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) continue if line.strip().startswith('#'): to_write.append(line) continue # Check if we got a valid line for splitting if ( get_platform() == 'SunOS' and len(line.split()) != 7 or get_platform() != 'SunOS' and len(line.split()) != 6): to_write.append(line) continue ld = {} if get_platform() == 'SunOS': ( ld['src'], dash, ld['name'], ld['fstype'], ld['passno'], ld['boot'], ld['opts'] ) = line.split() else: ( ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] ) = line.split() # Check if we found the correct line if ( ld['name'] != escaped_args['name'] or ( # In the case of swap, check the src instead 'src' in args and ld['name'] == 'none' and ld['fstype'] == 'swap' and ld['src'] != args['src'])): to_write.append(line) continue # If we got here we found a match - let's check if there is any # difference exists = True args_to_check = ('src', 'fstype', 'opts', 'dump', 'passno') if get_platform() == 'SunOS': args_to_check = ('src', 'fstype', 'passno', 'boot', 'opts') for t in args_to_check: if ld[t] != escaped_args[t]: ld[t] = escaped_args[t] changed = True if changed: to_write.append(new_line % ld) else: to_write.append(line) if not exists: to_write.append(new_line % escaped_args) changed = True if changed and not module.check_mode: write_fstab(module, to_write, args['fstab']) return (args['name'], changed) def unset_mount(module, args): """Remove a mount point from fstab.""" to_write = [] changed = False escaped_name = _escape_fstab(args['name']) for line in open(args['fstab'], 'r').readlines(): if not line.strip(): to_write.append(line) continue if line.strip().startswith('#'): to_write.append(line) continue # Check if we got a valid line for splitting if ( get_platform() == 'SunOS' and len(line.split()) != 7 or get_platform() != 'SunOS' and len(line.split()) != 6): to_write.append(line) continue ld = {} if get_platform() == 'SunOS': ( ld['src'], dash, ld['name'], ld['fstype'], ld['passno'], ld['boot'], ld['opts'] ) = line.split() else: ( ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] ) = line.split() if ( ld['name'] != escaped_name or ( # In the case of swap, check the src instead 'src' in args and ld['name'] == 'none' and ld['fstype'] == 'swap' and ld['src'] != args['src'])): to_write.append(line) continue # If we got here we found a match - continue and mark changed changed = True if changed and not module.check_mode: write_fstab(module, to_write, args['fstab']) return (args['name'], changed) def _set_fstab_args(fstab_file): result = [] if ( fstab_file and fstab_file != '/etc/fstab' and get_platform().lower() != 'sunos'): if get_platform().lower().endswith('bsd'): result.append('-F') else: result.append('-T') result.append(fstab_file) return result def mount(module, args): """Mount up a path or remount if needed.""" mount_bin = module.get_bin_path('mount', required=True) name = args['name'] cmd = [mount_bin] if get_platform().lower() == 'openbsd': # Use module.params['fstab'] here as args['fstab'] has been set to the # default value. if module.params['fstab'] is not None: module.fail_json( msg=( 'OpenBSD does not support alternate fstab files. Do not ' 'specify the fstab parameter for OpenBSD hosts')) else: cmd += _set_fstab_args(args['fstab']) cmd += [name] rc, out, err = module.run_command(cmd) if rc == 0: return 0, '' else: return rc, out + err def umount(module, path): """Unmount a path.""" umount_bin = module.get_bin_path('umount', required=True) cmd = [umount_bin, path] rc, out, err = module.run_command(cmd) if rc == 0: return 0, '' else: return rc, out + err def remount(module, args): """Try to use 'remount' first and fallback to (u)mount if unsupported.""" mount_bin = module.get_bin_path('mount', required=True) cmd = [mount_bin] # Multiplatform remount opts if get_platform().lower().endswith('bsd'): cmd += ['-u'] else: cmd += ['-o', 'remount'] if get_platform().lower() == 'openbsd': # Use module.params['fstab'] here as args['fstab'] has been set to the # default value. if module.params['fstab'] is not None: module.fail_json( msg=( 'OpenBSD does not support alternate fstab files. Do not ' 'specify the fstab parameter for OpenBSD hosts')) else: cmd += _set_fstab_args(args['fstab']) cmd += [args['name']] out = err = '' try: if get_platform().lower().endswith('bsd'): # Note: Forcing BSDs to do umount/mount due to BSD remount not # working as expected (suspect bug in the BSD mount command) # Interested contributor could rework this to use mount options on # the CLI instead of relying on fstab # https://github.com/ansible/ansible-modules-core/issues/5591 rc = 1 else: rc, out, err = module.run_command(cmd) except: rc = 1 msg = '' if rc != 0: msg = out + err rc, msg = umount(module, args['name']) if rc == 0: rc, msg = mount(module, args) return rc, msg # Note if we wanted to put this into module_utils we'd have to get permission # from @jupeter -- https://github.com/ansible/ansible-modules-core/pull/2923 # @jtyr -- https://github.com/ansible/ansible-modules-core/issues/4439 # and @abadger to relicense from GPLv3+ def is_bind_mounted(module, linux_mounts, dest, src=None, fstype=None): """Return whether the dest is bind mounted :arg module: The AnsibleModule (used for helper functions) :arg dest: The directory to be mounted under. This is the primary means of identifying whether the destination is mounted. :kwarg src: The source directory. If specified, this is used to help ensure that we are detecting that the correct source is mounted there. :kwarg fstype: The filesystem type. If specified this is also used to help ensure that we are detecting the right mount. :kwarg linux_mounts: Cached list of mounts for Linux. :returns: True if the dest is mounted with src otherwise False. """ is_mounted = False if get_platform() == 'Linux' and linux_mounts is not None: if src is None: # That's for unmounted/absent if dest in linux_mounts: is_mounted = True else: if dest in linux_mounts: is_mounted = linux_mounts[dest]['src'] == src else: bin_path = module.get_bin_path('mount', required=True) cmd = '%s -l' % bin_path rc, out, err = module.run_command(cmd) mounts = [] if len(out): mounts = to_native(out).strip().split('\n') for mnt in mounts: arguments = mnt.split() if ( (arguments[0] == src or src is None) and arguments[2] == dest and (arguments[4] == fstype or fstype is None)): is_mounted = True if is_mounted: break return is_mounted def get_linux_mounts(module, mntinfo_file="/proc/self/mountinfo"): """Gather mount information""" try: f = open(mntinfo_file) except IOError: return lines = map(str.strip, f.readlines()) try: f.close() except IOError: module.fail_json(msg="Cannot close file %s" % mntinfo_file) mntinfo = {} for line in lines: fields = line.split() record = { 'id': int(fields[0]), 'parent_id': int(fields[1]), 'root': fields[3], 'dst': fields[4], 'opts': fields[5], 'fs': fields[-3], 'src': fields[-2] } mntinfo[record['id']] = record mounts = {} for mnt in mntinfo.values(): if mnt['parent_id'] != 1 and mnt['parent_id'] in mntinfo: m = mntinfo[mnt['parent_id']] if ( len(m['root']) > 1 and mnt['root'].startswith("%s/" % m['root'])): # Ommit the parent's root in the child's root # == Example: # 140 136 253:2 /rootfs / rw - ext4 /dev/sdb2 rw # 141 140 253:2 /rootfs/tmp/aaa /tmp/bbb rw - ext4 /dev/sdb2 rw # == Expected result: # src=/tmp/aaa mnt['root'] = mnt['root'][len(m['root']):] # Prepend the parent's dst to the child's root # == Example: # 42 60 0:35 / /tmp rw - tmpfs tmpfs rw # 78 42 0:35 /aaa /tmp/bbb rw - tmpfs tmpfs rw # == Expected result: # src=/tmp/aaa if m['dst'] != '/': mnt['root'] = "%s%s" % (m['dst'], mnt['root']) src = mnt['root'] else: src = mnt['src'] record = { 'dst': mnt['dst'], 'src': src, 'opts': mnt['opts'], 'fs': mnt['fs'] } mounts[mnt['dst']] = record return mounts def main(): module = AnsibleModule( argument_spec=dict( boot=dict(type='bool', default=True), dump=dict(type='str'), fstab=dict(type='str'), fstype=dict(type='str'), path=dict(type='path', required=True, aliases=['name']), opts=dict(type='str'), passno=dict(type='str'), src=dict(type='path'), backup=dict(default=False, type='bool'), state=dict(type='str', required=True, choices=['absent', 'mounted', 'present', 'unmounted']), ), supports_check_mode=True, required_if=( ['state', 'mounted', ['src', 'fstype']], ['state', 'present', ['src', 'fstype']], ), ) # solaris args: # name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab # linux args: # name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab # Note: Do not modify module.params['fstab'] as we need to know if the user # explicitly specified it in mount() and remount() if get_platform().lower() == 'sunos': args = dict( name=module.params['path'], opts='-', passno='-', fstab=module.params['fstab'], boot='yes' ) if args['fstab'] is None: args['fstab'] = '/etc/vfstab' else: args = dict( name=module.params['path'], opts='defaults', dump='0', passno='0', fstab=module.params['fstab'] ) if args['fstab'] is None: args['fstab'] = '/etc/fstab' # FreeBSD doesn't have any 'default' so set 'rw' instead if get_platform() == 'FreeBSD': args['opts'] = 'rw' linux_mounts = [] # Cache all mounts here in order we have consistent results if we need to # call is_bind_mounted() multiple times if get_platform() == 'Linux': linux_mounts = get_linux_mounts(module) if linux_mounts is None: args['warnings'] = ( 'Cannot open file /proc/self/mountinfo. ' 'Bind mounts might be misinterpreted.') # Override defaults with user specified params for key in ('src', 'fstype', 'passno', 'opts', 'dump', 'fstab'): if module.params[key] is not None: args[key] = module.params[key] # If fstab file does not exist, we first need to create it. This mainly # happens when fstab option is passed to the module. if not os.path.exists(args['fstab']): if not os.path.exists(os.path.dirname(args['fstab'])): os.makedirs(os.path.dirname(args['fstab'])) open(args['fstab'], 'a').close() # absent: # Remove from fstab and unmounted. # unmounted: # Do not change fstab state, but unmount. # present: # Add to fstab, do not change mount state. # mounted: # Add to fstab if not there and make sure it is mounted. If it has # changed in fstab then remount it. state = module.params['state'] name = module.params['path'] changed = False if state == 'absent': name, changed = unset_mount(module, args) if changed and not module.check_mode: if ismount(name) or is_bind_mounted(module, linux_mounts, name): res, msg = umount(module, name) if res: module.fail_json( msg="Error unmounting %s: %s" % (name, msg)) if os.path.exists(name): try: os.rmdir(name) except (OSError, IOError) as e: module.fail_json(msg="Error rmdir %s: %s" % (name, to_native(e))) elif state == 'unmounted': if ismount(name) or is_bind_mounted(module, linux_mounts, name): if not module.check_mode: res, msg = umount(module, name) if res: module.fail_json( msg="Error unmounting %s: %s" % (name, msg)) changed = True elif state == 'mounted': if not os.path.exists(name) and not module.check_mode: try: os.makedirs(name) except (OSError, IOError) as e: module.fail_json( msg="Error making dir %s: %s" % (name, to_native(e))) name, changed = set_mount(module, args) res = 0 if ( ismount(name) or is_bind_mounted( module, linux_mounts, name, args['src'], args['fstype'])): if changed and not module.check_mode: res, msg = remount(module, args) changed = True else: changed = True if not module.check_mode: res, msg = mount(module, args) if res: module.fail_json(msg="Error mounting %s: %s" % (name, msg)) elif state == 'present': name, changed = set_mount(module, args) else: module.fail_json(msg='Unexpected position reached') module.exit_json(changed=changed, **args) if __name__ == '__main__': main()
dezGusty/googletest
refs/heads/master
test/gtest_env_var_test.py
2408
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Verifies that Google Test correctly parses environment variables.""" __author__ = 'wan@google.com (Zhanyong Wan)' import os import gtest_test_utils IS_WINDOWS = os.name == 'nt' IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_') environ = os.environ.copy() def AssertEq(expected, actual): if expected != actual: print 'Expected: %s' % (expected,) print ' Actual: %s' % (actual,) raise AssertionError def SetEnvVar(env_var, value): """Sets the env variable to 'value'; unsets it when 'value' is None.""" if value is not None: environ[env_var] = value elif env_var in environ: del environ[env_var] def GetFlag(flag): """Runs gtest_env_var_test_ and returns its output.""" args = [COMMAND] if flag is not None: args += [flag] return gtest_test_utils.Subprocess(args, env=environ).output def TestFlag(flag, test_val, default_val): """Verifies that the given flag is affected by the corresponding env var.""" env_var = 'GTEST_' + flag.upper() SetEnvVar(env_var, test_val) AssertEq(test_val, GetFlag(flag)) SetEnvVar(env_var, None) AssertEq(default_val, GetFlag(flag)) class GTestEnvVarTest(gtest_test_utils.TestCase): def testEnvVarAffectsFlag(self): """Tests that environment variable should affect the corresponding flag.""" TestFlag('break_on_failure', '1', '0') TestFlag('color', 'yes', 'auto') TestFlag('filter', 'FooTest.Bar', '*') TestFlag('output', 'xml:tmp/foo.xml', '') TestFlag('print_time', '0', '1') TestFlag('repeat', '999', '1') TestFlag('throw_on_failure', '1', '0') TestFlag('death_test_style', 'threadsafe', 'fast') TestFlag('catch_exceptions', '0', '1') if IS_LINUX: TestFlag('death_test_use_fork', '1', '0') TestFlag('stack_trace_depth', '0', '100') if __name__ == '__main__': gtest_test_utils.Main()
TRESCLOUD/odoopub
refs/heads/master
addons/association/__openerp__.py
119
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Associations Management', 'version': '0.1', 'category': 'Specific Industry Applications', 'description': """ This module is to configure modules related to an association. ============================================================== It installs the profile for associations to manage events, registrations, memberships, membership products (schemes). """, 'author': 'OpenERP SA', 'depends': ['base_setup', 'membership', 'event'], 'data': ['security/ir.model.access.csv', 'profile_association.xml'], 'demo': [], 'installable': True, 'auto_install': False, 'images': ['images/association1.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
vmthunder/virtman
refs/heads/master
virtman/utils/singleton.py
1
#!/usr/bin/env python import threading def singleton(cls): """ singleton decorator, thread safe singleton mode. Maybe GIL makes sure thread safe? P.S., No singleton in cls() """ lock = threading.Lock() instances = {} def _singleton(*args, **kwargs): if cls not in instances: with lock: if cls not in instances: instances[cls] = cls(*args, **kwargs) return instances[cls] return _singleton
titom1986/CouchPotatoServer
refs/heads/develop
couchpotato/core/media/movie/providers/trailer/youtube_dl/__init__.py
6
#!/usr/bin/env python # -*- coding: utf-8 -*- __authors__ = ( 'Ricardo Garcia Gonzalez', 'Danny Colligan', 'Benjamin Johnson', 'Vasyl\' Vavrychuk', 'Witold Baryluk', 'Paweł Paprota', 'Gergely Imreh', 'Rogério Brito', 'Philipp Hagemeister', 'Sören Schulze', 'Kevin Ngo', 'Ori Avtalion', 'shizeeg', 'Filippo Valsorda', 'Christian Albrecht', 'Dave Vasilevsky', 'Jaime Marquínez Ferrándiz', 'Jeff Crouse', 'Osama Khalid', 'Michael Walter', 'M. Yasoob Ullah Khalid', 'Julien Fraichard', 'Johny Mo Swag', 'Axel Noack', 'Albert Kim', 'Pierre Rudloff', 'Huarong Huo', 'Ismael Mejía', 'Steffan \'Ruirize\' James', 'Andras Elso', 'Jelle van der Waa', 'Marcin Cieślak', 'Anton Larionov', 'Takuya Tsuchida', 'Sergey M.', 'Michael Orlitzky', 'Chris Gahan', 'Saimadhav Heblikar', 'Mike Col', 'Oleg Prutz', 'pulpe', 'Andreas Schmitz', 'Michael Kaiser', 'Niklas Laxström', 'David Triendl', 'Anthony Weems', 'David Wagner', 'Juan C. Olivares', 'Mattias Harrysson', 'phaer', 'Sainyam Kapoor', 'Nicolas Évrard', 'Jason Normore', 'Hoje Lee', 'Adam Thalhammer', 'Georg Jähnig', 'Ralf Haring', 'Koki Takahashi', 'Ariset Llerena', 'Adam Malcontenti-Wilson', 'Tobias Bell', 'Naglis Jonaitis', 'Charles Chen', 'Hassaan Ali', ) __license__ = 'Public Domain' import codecs import io import optparse import os import random import shlex import shutil import sys from .utils import ( compat_getpass, compat_print, DateRange, DEFAULT_OUTTMPL, decodeOption, get_term_width, DownloadError, get_cachedir, MaxDownloadsReached, preferredencoding, read_batch_urls, SameFileError, setproctitle, std_headers, write_string, ) from .update import update_self from .downloader import ( FileDownloader, ) from .extractor import gen_extractors from .version import __version__ from .YoutubeDL import YoutubeDL from .postprocessor import ( AtomicParsleyPP, FFmpegAudioFixPP, FFmpegMetadataPP, FFmpegVideoConvertor, FFmpegExtractAudioPP, FFmpegEmbedSubtitlePP, XAttrMetadataPP, ) def parseOpts(overrideArguments=None): def _readOptions(filename_bytes, default=[]): try: optionf = open(filename_bytes) except IOError: return default # silently skip if file is not present try: res = [] for l in optionf: res += shlex.split(l, comments=True) finally: optionf.close() return res def _readUserConf(): xdg_config_home = os.environ.get('XDG_CONFIG_HOME') if xdg_config_home: userConfFile = os.path.join(xdg_config_home, 'youtube-dl', 'config') if not os.path.isfile(userConfFile): userConfFile = os.path.join(xdg_config_home, 'youtube-dl.conf') else: userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl', 'config') if not os.path.isfile(userConfFile): userConfFile = os.path.join(os.path.expanduser('~'), '.config', 'youtube-dl.conf') userConf = _readOptions(userConfFile, None) if userConf is None: appdata_dir = os.environ.get('appdata') if appdata_dir: userConf = _readOptions( os.path.join(appdata_dir, 'youtube-dl', 'config'), default=None) if userConf is None: userConf = _readOptions( os.path.join(appdata_dir, 'youtube-dl', 'config.txt'), default=None) if userConf is None: userConf = _readOptions( os.path.join(os.path.expanduser('~'), 'youtube-dl.conf'), default=None) if userConf is None: userConf = _readOptions( os.path.join(os.path.expanduser('~'), 'youtube-dl.conf.txt'), default=None) if userConf is None: userConf = [] return userConf def _format_option_string(option): ''' ('-o', '--option') -> -o, --format METAVAR''' opts = [] if option._short_opts: opts.append(option._short_opts[0]) if option._long_opts: opts.append(option._long_opts[0]) if len(opts) > 1: opts.insert(1, ', ') if option.takes_value(): opts.append(' %s' % option.metavar) return "".join(opts) def _comma_separated_values_options_callback(option, opt_str, value, parser): setattr(parser.values, option.dest, value.split(',')) def _hide_login_info(opts): opts = list(opts) for private_opt in ['-p', '--password', '-u', '--username', '--video-password']: try: i = opts.index(private_opt) opts[i+1] = '<PRIVATE>' except ValueError: pass return opts max_width = 80 max_help_position = 80 # No need to wrap help messages if we're on a wide console columns = get_term_width() if columns: max_width = columns fmt = optparse.IndentedHelpFormatter(width=max_width, max_help_position=max_help_position) fmt.format_option_strings = _format_option_string kw = { 'version' : __version__, 'formatter' : fmt, 'usage' : '%prog [options] url [url...]', 'conflict_handler' : 'resolve', } parser = optparse.OptionParser(**kw) # option groups general = optparse.OptionGroup(parser, 'General Options') selection = optparse.OptionGroup(parser, 'Video Selection') authentication = optparse.OptionGroup(parser, 'Authentication Options') video_format = optparse.OptionGroup(parser, 'Video Format Options') subtitles = optparse.OptionGroup(parser, 'Subtitle Options') downloader = optparse.OptionGroup(parser, 'Download Options') postproc = optparse.OptionGroup(parser, 'Post-processing Options') filesystem = optparse.OptionGroup(parser, 'Filesystem Options') workarounds = optparse.OptionGroup(parser, 'Workarounds') verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options') general.add_option('-h', '--help', action='help', help='print this help text and exit') general.add_option('-v', '--version', action='version', help='print program version and exit') general.add_option('-U', '--update', action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)') general.add_option('-i', '--ignore-errors', action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False) general.add_option('--abort-on-error', action='store_false', dest='ignoreerrors', help='Abort downloading of further videos (in the playlist or the command line) if an error occurs') general.add_option('--dump-user-agent', action='store_true', dest='dump_user_agent', help='display the current browser identification', default=False) general.add_option('--list-extractors', action='store_true', dest='list_extractors', help='List all supported extractors and the URLs they would handle', default=False) general.add_option('--extractor-descriptions', action='store_true', dest='list_extractor_descriptions', help='Output descriptions of all supported extractors', default=False) general.add_option( '--proxy', dest='proxy', default=None, metavar='URL', help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection') general.add_option( '--socket-timeout', dest='socket_timeout', type=float, default=None, help=u'Time to wait before giving up, in seconds') general.add_option( '--default-search', dest='default_search', metavar='PREFIX', help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.') general.add_option( '--ignore-config', action='store_true', help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)') selection.add_option( '--playlist-start', dest='playliststart', metavar='NUMBER', default=1, type=int, help='playlist video to start at (default is %default)') selection.add_option( '--playlist-end', dest='playlistend', metavar='NUMBER', default=None, type=int, help='playlist video to end at (default is last)') selection.add_option('--match-title', dest='matchtitle', metavar='REGEX',help='download only matching titles (regex or caseless sub-string)') selection.add_option('--reject-title', dest='rejecttitle', metavar='REGEX',help='skip download for matching titles (regex or caseless sub-string)') selection.add_option('--max-downloads', metavar='NUMBER', dest='max_downloads', type=int, default=None, help='Abort after downloading NUMBER files') selection.add_option('--min-filesize', metavar='SIZE', dest='min_filesize', help="Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)", default=None) selection.add_option('--max-filesize', metavar='SIZE', dest='max_filesize', help="Do not download any videos larger than SIZE (e.g. 50k or 44.6m)", default=None) selection.add_option('--date', metavar='DATE', dest='date', help='download only videos uploaded in this date', default=None) selection.add_option( '--datebefore', metavar='DATE', dest='datebefore', default=None, help='download only videos uploaded on or before this date (i.e. inclusive)') selection.add_option( '--dateafter', metavar='DATE', dest='dateafter', default=None, help='download only videos uploaded on or after this date (i.e. inclusive)') selection.add_option( '--min-views', metavar='COUNT', dest='min_views', default=None, type=int, help="Do not download any videos with less than COUNT views",) selection.add_option( '--max-views', metavar='COUNT', dest='max_views', default=None, type=int, help="Do not download any videos with more than COUNT views",) selection.add_option('--no-playlist', action='store_true', dest='noplaylist', help='download only the currently playing video', default=False) selection.add_option('--age-limit', metavar='YEARS', dest='age_limit', help='download only videos suitable for the given age', default=None, type=int) selection.add_option('--download-archive', metavar='FILE', dest='download_archive', help='Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.') selection.add_option( '--include-ads', dest='include_ads', action='store_true', help='Download advertisements as well (experimental)') selection.add_option( '--youtube-include-dash-manifest', action='store_true', dest='youtube_include_dash_manifest', default=False, help='Try to download the DASH manifest on YouTube videos (experimental)') authentication.add_option('-u', '--username', dest='username', metavar='USERNAME', help='account username') authentication.add_option('-p', '--password', dest='password', metavar='PASSWORD', help='account password') authentication.add_option('-n', '--netrc', action='store_true', dest='usenetrc', help='use .netrc authentication data', default=False) authentication.add_option('--video-password', dest='videopassword', metavar='PASSWORD', help='video password (vimeo, smotri)') video_format.add_option('-f', '--format', action='store', dest='format', metavar='FORMAT', default=None, help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality.') video_format.add_option('--all-formats', action='store_const', dest='format', help='download all available video formats', const='all') video_format.add_option('--prefer-free-formats', action='store_true', dest='prefer_free_formats', default=False, help='prefer free video formats unless a specific one is requested') video_format.add_option('--max-quality', action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download') video_format.add_option('-F', '--list-formats', action='store_true', dest='listformats', help='list all available formats') subtitles.add_option('--write-sub', '--write-srt', action='store_true', dest='writesubtitles', help='write subtitle file', default=False) subtitles.add_option('--write-auto-sub', '--write-automatic-sub', action='store_true', dest='writeautomaticsub', help='write automatic subtitle file (youtube only)', default=False) subtitles.add_option('--all-subs', action='store_true', dest='allsubtitles', help='downloads all the available subtitles of the video', default=False) subtitles.add_option('--list-subs', action='store_true', dest='listsubtitles', help='lists all available subtitles for the video', default=False) subtitles.add_option('--sub-format', action='store', dest='subtitlesformat', metavar='FORMAT', help='subtitle format (default=srt) ([sbv/vtt] youtube only)', default='srt') subtitles.add_option('--sub-lang', '--sub-langs', '--srt-lang', action='callback', dest='subtitleslangs', metavar='LANGS', type='str', default=[], callback=_comma_separated_values_options_callback, help='languages of the subtitles to download (optional) separated by commas, use IETF language tags like \'en,pt\'') downloader.add_option('-r', '--rate-limit', dest='ratelimit', metavar='LIMIT', help='maximum download rate in bytes per second (e.g. 50K or 4.2M)') downloader.add_option('-R', '--retries', dest='retries', metavar='RETRIES', help='number of retries (default is %default)', default=10) downloader.add_option('--buffer-size', dest='buffersize', metavar='SIZE', help='size of download buffer (e.g. 1024 or 16K) (default is %default)', default="1024") downloader.add_option('--no-resize-buffer', action='store_true', dest='noresizebuffer', help='do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.', default=False) downloader.add_option('--test', action='store_true', dest='test', default=False, help=optparse.SUPPRESS_HELP) workarounds.add_option( '--encoding', dest='encoding', metavar='ENCODING', help='Force the specified encoding (experimental)') workarounds.add_option( '--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.') workarounds.add_option( '--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure', help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)') workarounds.add_option( '--user-agent', metavar='UA', dest='user_agent', help='specify a custom user agent') workarounds.add_option( '--referer', metavar='REF', dest='referer', default=None, help='specify a custom referer, use if the video access is restricted to one domain', ) workarounds.add_option( '--add-header', metavar='FIELD:VALUE', dest='headers', action='append', help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times', ) workarounds.add_option( '--bidi-workaround', dest='bidi_workaround', action='store_true', help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH') verbosity.add_option('-q', '--quiet', action='store_true', dest='quiet', help='activates quiet mode', default=False) verbosity.add_option( '--no-warnings', dest='no_warnings', action='store_true', default=False, help='Ignore warnings') verbosity.add_option('-s', '--simulate', action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False) verbosity.add_option('--skip-download', action='store_true', dest='skip_download', help='do not download the video', default=False) verbosity.add_option('-g', '--get-url', action='store_true', dest='geturl', help='simulate, quiet but print URL', default=False) verbosity.add_option('-e', '--get-title', action='store_true', dest='gettitle', help='simulate, quiet but print title', default=False) verbosity.add_option('--get-id', action='store_true', dest='getid', help='simulate, quiet but print id', default=False) verbosity.add_option('--get-thumbnail', action='store_true', dest='getthumbnail', help='simulate, quiet but print thumbnail URL', default=False) verbosity.add_option('--get-description', action='store_true', dest='getdescription', help='simulate, quiet but print video description', default=False) verbosity.add_option('--get-duration', action='store_true', dest='getduration', help='simulate, quiet but print video length', default=False) verbosity.add_option('--get-filename', action='store_true', dest='getfilename', help='simulate, quiet but print output filename', default=False) verbosity.add_option('--get-format', action='store_true', dest='getformat', help='simulate, quiet but print output format', default=False) verbosity.add_option('-j', '--dump-json', action='store_true', dest='dumpjson', help='simulate, quiet but print JSON information. See --output for a description of available keys.', default=False) verbosity.add_option('--newline', action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False) verbosity.add_option('--no-progress', action='store_true', dest='noprogress', help='do not print progress bar', default=False) verbosity.add_option('--console-title', action='store_true', dest='consoletitle', help='display progress in console titlebar', default=False) verbosity.add_option('-v', '--verbose', action='store_true', dest='verbose', help='print various debugging information', default=False) verbosity.add_option('--dump-intermediate-pages', action='store_true', dest='dump_intermediate_pages', default=False, help='print downloaded pages to debug problems (very verbose)') verbosity.add_option('--write-pages', action='store_true', dest='write_pages', default=False, help='Write downloaded intermediary pages to files in the current directory to debug problems') verbosity.add_option('--youtube-print-sig-code', action='store_true', dest='youtube_print_sig_code', default=False, help=optparse.SUPPRESS_HELP) verbosity.add_option('--print-traffic', dest='debug_printtraffic', action='store_true', default=False, help='Display sent and read HTTP traffic') filesystem.add_option('-a', '--batch-file', dest='batchfile', metavar='FILE', help='file containing URLs to download (\'-\' for stdin)') filesystem.add_option('--id', action='store_true', dest='useid', help='use only video ID in file name', default=False) filesystem.add_option('-A', '--auto-number', action='store_true', dest='autonumber', help='number downloaded files starting from 00000', default=False) filesystem.add_option('-o', '--output', dest='outtmpl', metavar='TEMPLATE', help=('output filename template. Use %(title)s to get the title, ' '%(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, ' '%(autonumber)s to get an automatically incremented number, ' '%(ext)s for the filename extension, ' '%(format)s for the format description (like "22 - 1280x720" or "HD"), ' '%(format_id)s for the unique id of the format (like Youtube\'s itags: "137"), ' '%(upload_date)s for the upload date (YYYYMMDD), ' '%(extractor)s for the provider (youtube, metacafe, etc), ' '%(id)s for the video id, %(playlist)s for the playlist the video is in, ' '%(playlist_index)s for the position in the playlist and %% for a literal percent. ' '%(height)s and %(width)s for the width and height of the video format. ' '%(resolution)s for a textual description of the resolution of the video format. ' 'Use - to output to stdout. Can also be used to download to a different directory, ' 'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .')) filesystem.add_option('--autonumber-size', dest='autonumber_size', metavar='NUMBER', help='Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given') filesystem.add_option('--restrict-filenames', action='store_true', dest='restrictfilenames', help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False) filesystem.add_option('-t', '--title', action='store_true', dest='usetitle', help='[deprecated] use title in file name (default)', default=False) filesystem.add_option('-l', '--literal', action='store_true', dest='usetitle', help='[deprecated] alias of --title', default=False) filesystem.add_option('-w', '--no-overwrites', action='store_true', dest='nooverwrites', help='do not overwrite files', default=False) filesystem.add_option('-c', '--continue', action='store_true', dest='continue_dl', help='force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.', default=True) filesystem.add_option('--no-continue', action='store_false', dest='continue_dl', help='do not resume partially downloaded files (restart from beginning)') filesystem.add_option('--no-part', action='store_true', dest='nopart', help='do not use .part files', default=False) filesystem.add_option('--no-mtime', action='store_false', dest='updatetime', help='do not use the Last-modified header to set the file modification time', default=True) filesystem.add_option('--write-description', action='store_true', dest='writedescription', help='write video description to a .description file', default=False) filesystem.add_option('--write-info-json', action='store_true', dest='writeinfojson', help='write video metadata to a .info.json file', default=False) filesystem.add_option('--write-annotations', action='store_true', dest='writeannotations', help='write video annotations to a .annotation file', default=False) filesystem.add_option('--write-thumbnail', action='store_true', dest='writethumbnail', help='write thumbnail image to disk', default=False) filesystem.add_option('--load-info', dest='load_info_filename', metavar='FILE', help='json file containing the video information (created with the "--write-json" option)') filesystem.add_option('--cookies', dest='cookiefile', metavar='FILE', help='file to read cookies from and dump cookie jar in') filesystem.add_option( '--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR', help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.') filesystem.add_option( '--no-cache-dir', action='store_const', const=None, dest='cachedir', help='Disable filesystem caching') filesystem.add_option( '--rm-cache-dir', action='store_true', dest='rm_cachedir', help='Delete all filesystem cache files') postproc.add_option('-x', '--extract-audio', action='store_true', dest='extractaudio', default=False, help='convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)') postproc.add_option('--audio-format', metavar='FORMAT', dest='audioformat', default='best', help='"best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; best by default') postproc.add_option('--audio-quality', metavar='QUALITY', dest='audioquality', default='5', help='ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K (default 5)') postproc.add_option('--recode-video', metavar='FORMAT', dest='recodevideo', default=None, help='Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)') postproc.add_option('-k', '--keep-video', action='store_true', dest='keepvideo', default=False, help='keeps the video file on disk after the post-processing; the video is erased by default') postproc.add_option('--no-post-overwrites', action='store_true', dest='nopostoverwrites', default=False, help='do not overwrite post-processed files; the post-processed files are overwritten by default') postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False, help='embed subtitles in the video (only for mp4 videos)') postproc.add_option('--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False, help='embed thumbnail in the audio as cover art') postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False, help='write metadata to the video file') postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False, help='write metadata to the video file\'s xattrs (using dublin core and xdg standards)') postproc.add_option('--prefer-avconv', action='store_false', dest='prefer_ffmpeg', help='Prefer avconv over ffmpeg for running the postprocessors (default)') postproc.add_option('--prefer-ffmpeg', action='store_true', dest='prefer_ffmpeg', help='Prefer ffmpeg over avconv for running the postprocessors') parser.add_option_group(general) parser.add_option_group(selection) parser.add_option_group(downloader) parser.add_option_group(filesystem) parser.add_option_group(verbosity) parser.add_option_group(workarounds) parser.add_option_group(video_format) parser.add_option_group(subtitles) parser.add_option_group(authentication) parser.add_option_group(postproc) if overrideArguments is not None: opts, args = parser.parse_args(overrideArguments) if opts.verbose: write_string(u'[debug] Override config: ' + repr(overrideArguments) + '\n') else: commandLineConf = sys.argv[1:] if '--ignore-config' in commandLineConf: systemConf = [] userConf = [] else: systemConf = _readOptions('/etc/youtube-dl.conf') if '--ignore-config' in systemConf: userConf = [] else: userConf = _readUserConf() argv = systemConf + userConf + commandLineConf opts, args = parser.parse_args(argv) if opts.verbose: write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n') write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n') write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n') return parser, opts, args def _real_main(argv=None): # Compatibility fixes for Windows if sys.platform == 'win32': # https://github.com/rg3/youtube-dl/issues/820 codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None) setproctitle(u'youtube-dl') parser, opts, args = parseOpts(argv) # Set user agent if opts.user_agent is not None: std_headers['User-Agent'] = opts.user_agent # Set referer if opts.referer is not None: std_headers['Referer'] = opts.referer # Custom HTTP headers if opts.headers is not None: for h in opts.headers: if h.find(':', 1) < 0: parser.error(u'wrong header formatting, it should be key:value, not "%s"'%h) key, value = h.split(':', 2) if opts.verbose: write_string(u'[debug] Adding header from command line option %s:%s\n'%(key, value)) std_headers[key] = value # Dump user agent if opts.dump_user_agent: compat_print(std_headers['User-Agent']) sys.exit(0) # Batch file verification batch_urls = [] if opts.batchfile is not None: try: if opts.batchfile == '-': batchfd = sys.stdin else: batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore') batch_urls = read_batch_urls(batchfd) if opts.verbose: write_string(u'[debug] Batch file urls: ' + repr(batch_urls) + u'\n') except IOError: sys.exit(u'ERROR: batch file could not be read') all_urls = batch_urls + args all_urls = [url.strip() for url in all_urls] _enc = preferredencoding() all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls] extractors = gen_extractors() if opts.list_extractors: for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()): compat_print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '')) matchedUrls = [url for url in all_urls if ie.suitable(url)] for mu in matchedUrls: compat_print(u' ' + mu) sys.exit(0) if opts.list_extractor_descriptions: for ie in sorted(extractors, key=lambda ie: ie.IE_NAME.lower()): if not ie._WORKING: continue desc = getattr(ie, 'IE_DESC', ie.IE_NAME) if desc is False: continue if hasattr(ie, 'SEARCH_KEY'): _SEARCHES = (u'cute kittens', u'slithering pythons', u'falling cat', u'angry poodle', u'purple fish', u'running tortoise', u'sleeping bunny') _COUNTS = (u'', u'5', u'10', u'all') desc += u' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES)) compat_print(desc) sys.exit(0) # Conflicting, missing and erroneous options if opts.usenetrc and (opts.username is not None or opts.password is not None): parser.error(u'using .netrc conflicts with giving username/password') if opts.password is not None and opts.username is None: parser.error(u'account username missing\n') if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid): parser.error(u'using output template conflicts with using title, video ID or auto number') if opts.usetitle and opts.useid: parser.error(u'using title conflicts with using video ID') if opts.username is not None and opts.password is None: opts.password = compat_getpass(u'Type account password and press [Return]: ') if opts.ratelimit is not None: numeric_limit = FileDownloader.parse_bytes(opts.ratelimit) if numeric_limit is None: parser.error(u'invalid rate limit specified') opts.ratelimit = numeric_limit if opts.min_filesize is not None: numeric_limit = FileDownloader.parse_bytes(opts.min_filesize) if numeric_limit is None: parser.error(u'invalid min_filesize specified') opts.min_filesize = numeric_limit if opts.max_filesize is not None: numeric_limit = FileDownloader.parse_bytes(opts.max_filesize) if numeric_limit is None: parser.error(u'invalid max_filesize specified') opts.max_filesize = numeric_limit if opts.retries is not None: try: opts.retries = int(opts.retries) except (TypeError, ValueError): parser.error(u'invalid retry count specified') if opts.buffersize is not None: numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize) if numeric_buffersize is None: parser.error(u'invalid buffer size specified') opts.buffersize = numeric_buffersize if opts.playliststart <= 0: raise ValueError(u'Playlist start must be positive') if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart: raise ValueError(u'Playlist end must be greater than playlist start') if opts.extractaudio: if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']: parser.error(u'invalid audio format specified') if opts.audioquality: opts.audioquality = opts.audioquality.strip('k').strip('K') if not opts.audioquality.isdigit(): parser.error(u'invalid audio quality specified') if opts.recodevideo is not None: if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']: parser.error(u'invalid video recode format specified') if opts.date is not None: date = DateRange.day(opts.date) else: date = DateRange(opts.dateafter, opts.datebefore) if opts.default_search not in ('auto', 'auto_warning', 'error', 'fixup_error', None) and ':' not in opts.default_search: parser.error(u'--default-search invalid; did you forget a colon (:) at the end?') # Do not download videos when there are audio-only formats if opts.extractaudio and not opts.keepvideo and opts.format is None: opts.format = 'bestaudio/best' # --all-sub automatically sets --write-sub if --write-auto-sub is not given # this was the old behaviour if only --all-sub was given. if opts.allsubtitles and (opts.writeautomaticsub == False): opts.writesubtitles = True if sys.version_info < (3,): # In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems) if opts.outtmpl is not None: opts.outtmpl = opts.outtmpl.decode(preferredencoding()) outtmpl =((opts.outtmpl is not None and opts.outtmpl) or (opts.format == '-1' and opts.usetitle and u'%(title)s-%(id)s-%(format)s.%(ext)s') or (opts.format == '-1' and u'%(id)s-%(format)s.%(ext)s') or (opts.usetitle and opts.autonumber and u'%(autonumber)s-%(title)s-%(id)s.%(ext)s') or (opts.usetitle and u'%(title)s-%(id)s.%(ext)s') or (opts.useid and u'%(id)s.%(ext)s') or (opts.autonumber and u'%(autonumber)s-%(id)s.%(ext)s') or DEFAULT_OUTTMPL) if not os.path.splitext(outtmpl)[1] and opts.extractaudio: parser.error(u'Cannot download a video and extract audio into the same' u' file! Use "{0}.%(ext)s" instead of "{0}" as the output' u' template'.format(outtmpl)) any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson download_archive_fn = os.path.expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive ydl_opts = { 'usenetrc': opts.usenetrc, 'username': opts.username, 'password': opts.password, 'videopassword': opts.videopassword, 'quiet': (opts.quiet or any_printing), 'no_warnings': opts.no_warnings, 'forceurl': opts.geturl, 'forcetitle': opts.gettitle, 'forceid': opts.getid, 'forcethumbnail': opts.getthumbnail, 'forcedescription': opts.getdescription, 'forceduration': opts.getduration, 'forcefilename': opts.getfilename, 'forceformat': opts.getformat, 'forcejson': opts.dumpjson, 'simulate': opts.simulate, 'skip_download': (opts.skip_download or opts.simulate or any_printing), 'format': opts.format, 'format_limit': opts.format_limit, 'listformats': opts.listformats, 'outtmpl': outtmpl, 'autonumber_size': opts.autonumber_size, 'restrictfilenames': opts.restrictfilenames, 'ignoreerrors': opts.ignoreerrors, 'ratelimit': opts.ratelimit, 'nooverwrites': opts.nooverwrites, 'retries': opts.retries, 'buffersize': opts.buffersize, 'noresizebuffer': opts.noresizebuffer, 'continuedl': opts.continue_dl, 'noprogress': opts.noprogress, 'progress_with_newline': opts.progress_with_newline, 'playliststart': opts.playliststart, 'playlistend': opts.playlistend, 'noplaylist': opts.noplaylist, 'logtostderr': opts.outtmpl == '-', 'consoletitle': opts.consoletitle, 'nopart': opts.nopart, 'updatetime': opts.updatetime, 'writedescription': opts.writedescription, 'writeannotations': opts.writeannotations, 'writeinfojson': opts.writeinfojson, 'writethumbnail': opts.writethumbnail, 'writesubtitles': opts.writesubtitles, 'writeautomaticsub': opts.writeautomaticsub, 'allsubtitles': opts.allsubtitles, 'listsubtitles': opts.listsubtitles, 'subtitlesformat': opts.subtitlesformat, 'subtitleslangs': opts.subtitleslangs, 'matchtitle': decodeOption(opts.matchtitle), 'rejecttitle': decodeOption(opts.rejecttitle), 'max_downloads': opts.max_downloads, 'prefer_free_formats': opts.prefer_free_formats, 'verbose': opts.verbose, 'dump_intermediate_pages': opts.dump_intermediate_pages, 'write_pages': opts.write_pages, 'test': opts.test, 'keepvideo': opts.keepvideo, 'min_filesize': opts.min_filesize, 'max_filesize': opts.max_filesize, 'min_views': opts.min_views, 'max_views': opts.max_views, 'daterange': date, 'cachedir': opts.cachedir, 'youtube_print_sig_code': opts.youtube_print_sig_code, 'age_limit': opts.age_limit, 'download_archive': download_archive_fn, 'cookiefile': opts.cookiefile, 'nocheckcertificate': opts.no_check_certificate, 'prefer_insecure': opts.prefer_insecure, 'proxy': opts.proxy, 'socket_timeout': opts.socket_timeout, 'bidi_workaround': opts.bidi_workaround, 'debug_printtraffic': opts.debug_printtraffic, 'prefer_ffmpeg': opts.prefer_ffmpeg, 'include_ads': opts.include_ads, 'default_search': opts.default_search, 'youtube_include_dash_manifest': opts.youtube_include_dash_manifest, 'encoding': opts.encoding, } with YoutubeDL(ydl_opts) as ydl: ydl.print_debug_header() ydl.add_default_info_extractors() # PostProcessors # Add the metadata pp first, the other pps will copy it if opts.addmetadata: ydl.add_post_processor(FFmpegMetadataPP()) if opts.extractaudio: ydl.add_post_processor(FFmpegExtractAudioPP(preferredcodec=opts.audioformat, preferredquality=opts.audioquality, nopostoverwrites=opts.nopostoverwrites)) if opts.recodevideo: ydl.add_post_processor(FFmpegVideoConvertor(preferedformat=opts.recodevideo)) if opts.embedsubtitles: ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat)) if opts.xattrs: ydl.add_post_processor(XAttrMetadataPP()) if opts.embedthumbnail: if not opts.addmetadata: ydl.add_post_processor(FFmpegAudioFixPP()) ydl.add_post_processor(AtomicParsleyPP()) # Update version if opts.update_self: update_self(ydl.to_screen, opts.verbose) # Remove cache dir if opts.rm_cachedir: if opts.cachedir is None: ydl.to_screen(u'No cache dir specified (Did you combine --no-cache-dir and --rm-cache-dir?)') else: if ('.cache' not in opts.cachedir) or ('youtube-dl' not in opts.cachedir): ydl.to_screen(u'Not removing directory %s - this does not look like a cache dir') retcode = 141 else: ydl.to_screen( u'Removing cache dir %s .' % opts.cachedir, skip_eol=True) if os.path.exists(opts.cachedir): ydl.to_screen(u'.', skip_eol=True) shutil.rmtree(opts.cachedir) ydl.to_screen(u'.') # Maybe do nothing if (len(all_urls) < 1) and (opts.load_info_filename is None): if not (opts.update_self or opts.rm_cachedir): parser.error(u'you must provide at least one URL') else: sys.exit() try: if opts.load_info_filename is not None: retcode = ydl.download_with_info_file(opts.load_info_filename) else: retcode = ydl.download(all_urls) except MaxDownloadsReached: ydl.to_screen(u'--max-download limit reached, aborting.') retcode = 101 sys.exit(retcode) def main(argv=None): try: _real_main(argv) except DownloadError: sys.exit(1) except SameFileError: sys.exit(u'ERROR: fixed output name but more than one file to download') except KeyboardInterrupt: sys.exit(u'\nERROR: Interrupted by user')
umkcdcrg01/ryu_openflow
refs/heads/master
pbr-1.0.1-py2.7.egg/pbr/hooks/__init__.py
101
# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pbr.hooks import backwards from pbr.hooks import commands from pbr.hooks import files from pbr.hooks import metadata def setup_hook(config): """Filter config parsed from a setup.cfg to inject our defaults.""" metadata_config = metadata.MetadataConfig(config) metadata_config.run() backwards.BackwardsCompatConfig(config).run() commands.CommandsConfig(config).run() files.FilesConfig(config, metadata_config.get_name()).run()
TEAM-Gummy/android_kernel_xiaomi_aries
refs/heads/kk4.4
scripts/build-all.py
1182
#! /usr/bin/env python # Copyright (c) 2009-2011, The Linux Foundation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Linux Foundation nor # the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Build the kernel for all targets using the Android build environment. # # TODO: Accept arguments to indicate what to build. import glob from optparse import OptionParser import subprocess import os import os.path import shutil import sys version = 'build-all.py, version 0.01' build_dir = '../all-kernels' make_command = ["vmlinux", "modules"] make_env = os.environ make_env.update({ 'ARCH': 'arm', 'CROSS_COMPILE': 'arm-none-linux-gnueabi-', 'KCONFIG_NOTIMESTAMP': 'true' }) all_options = {} def error(msg): sys.stderr.write("error: %s\n" % msg) def fail(msg): """Fail with a user-printed message""" error(msg) sys.exit(1) def check_kernel(): """Ensure that PWD is a kernel directory""" if (not os.path.isfile('MAINTAINERS') or not os.path.isfile('arch/arm/mach-msm/Kconfig')): fail("This doesn't seem to be an MSM kernel dir") def check_build(): """Ensure that the build directory is present.""" if not os.path.isdir(build_dir): try: os.makedirs(build_dir) except OSError as exc: if exc.errno == errno.EEXIST: pass else: raise def update_config(file, str): print 'Updating %s with \'%s\'\n' % (file, str) defconfig = open(file, 'a') defconfig.write(str + '\n') defconfig.close() def scan_configs(): """Get the full list of defconfigs appropriate for this tree.""" names = {} for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'): names[os.path.basename(n)[:-10]] = n for n in glob.glob('arch/arm/configs/qsd*_defconfig'): names[os.path.basename(n)[:-10]] = n for n in glob.glob('arch/arm/configs/apq*_defconfig'): names[os.path.basename(n)[:-10]] = n return names class Builder: def __init__(self, logname): self.logname = logname self.fd = open(logname, 'w') def run(self, args): devnull = open('/dev/null', 'r') proc = subprocess.Popen(args, stdin=devnull, env=make_env, bufsize=0, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) count = 0 # for line in proc.stdout: rawfd = proc.stdout.fileno() while True: line = os.read(rawfd, 1024) if not line: break self.fd.write(line) self.fd.flush() if all_options.verbose: sys.stdout.write(line) sys.stdout.flush() else: for i in range(line.count('\n')): count += 1 if count == 64: count = 0 print sys.stdout.write('.') sys.stdout.flush() print result = proc.wait() self.fd.close() return result failed_targets = [] def build(target): dest_dir = os.path.join(build_dir, target) log_name = '%s/log-%s.log' % (build_dir, target) print 'Building %s in %s log %s' % (target, dest_dir, log_name) if not os.path.isdir(dest_dir): os.mkdir(dest_dir) defconfig = 'arch/arm/configs/%s_defconfig' % target dotconfig = '%s/.config' % dest_dir savedefconfig = '%s/defconfig' % dest_dir shutil.copyfile(defconfig, dotconfig) devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, '%s_defconfig' % target], env=make_env, stdin=devnull) devnull.close() if not all_options.updateconfigs: build = Builder(log_name) result = build.run(['make', 'O=%s' % dest_dir] + make_command) if result != 0: if all_options.keep_going: failed_targets.append(target) fail_or_error = error else: fail_or_error = fail fail_or_error("Failed to build %s, see %s" % (target, build.logname)) # Copy the defconfig back. if all_options.configs or all_options.updateconfigs: devnull = open('/dev/null', 'r') subprocess.check_call(['make', 'O=%s' % dest_dir, 'savedefconfig'], env=make_env, stdin=devnull) devnull.close() shutil.copyfile(savedefconfig, defconfig) def build_many(allconf, targets): print "Building %d target(s)" % len(targets) for target in targets: if all_options.updateconfigs: update_config(allconf[target], all_options.updateconfigs) build(target) if failed_targets: fail('\n '.join(["Failed targets:"] + [target for target in failed_targets])) def main(): global make_command check_kernel() check_build() configs = scan_configs() usage = (""" %prog [options] all -- Build all targets %prog [options] target target ... -- List specific targets %prog [options] perf -- Build all perf targets %prog [options] noperf -- Build all non-perf targets""") parser = OptionParser(usage=usage, version=version) parser.add_option('--configs', action='store_true', dest='configs', help="Copy configs back into tree") parser.add_option('--list', action='store_true', dest='list', help='List available targets') parser.add_option('-v', '--verbose', action='store_true', dest='verbose', help='Output to stdout in addition to log file') parser.add_option('--oldconfig', action='store_true', dest='oldconfig', help='Only process "make oldconfig"') parser.add_option('--updateconfigs', dest='updateconfigs', help="Update defconfigs with provided option setting, " "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'") parser.add_option('-j', '--jobs', type='int', dest="jobs", help="Number of simultaneous jobs") parser.add_option('-l', '--load-average', type='int', dest='load_average', help="Don't start multiple jobs unless load is below LOAD_AVERAGE") parser.add_option('-k', '--keep-going', action='store_true', dest='keep_going', default=False, help="Keep building other targets if a target fails") parser.add_option('-m', '--make-target', action='append', help='Build the indicated make target (default: %s)' % ' '.join(make_command)) (options, args) = parser.parse_args() global all_options all_options = options if options.list: print "Available targets:" for target in configs.keys(): print " %s" % target sys.exit(0) if options.oldconfig: make_command = ["oldconfig"] elif options.make_target: make_command = options.make_target if options.jobs: make_command.append("-j%d" % options.jobs) if options.load_average: make_command.append("-l%d" % options.load_average) if args == ['all']: build_many(configs, configs.keys()) elif args == ['perf']: targets = [] for t in configs.keys(): if "perf" in t: targets.append(t) build_many(configs, targets) elif args == ['noperf']: targets = [] for t in configs.keys(): if "perf" not in t: targets.append(t) build_many(configs, targets) elif len(args) > 0: targets = [] for t in args: if t not in configs.keys(): parser.error("Target '%s' not one of %s" % (t, configs.keys())) targets.append(t) build_many(configs, targets) else: parser.error("Must specify a target to build, or 'all'") if __name__ == "__main__": main()
Multiposting/mongoengine
refs/heads/master
tests/migration/__init__.py
30
from convert_to_new_inheritance_model import * from decimalfield_as_float import * from refrencefield_dbref_to_object_id import * from turn_off_inheritance import * from uuidfield_to_binary import * if __name__ == '__main__': unittest.main()
marcosbontempo/inatelos
refs/heads/master
poky-daisy/scripts/lib/mic/plugins/imager/fs_plugin.py
1
#!/usr/bin/python -tt # # Copyright (c) 2011 Intel, Inc. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; version 2 of the License # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place - Suite 330, Boston, MA 02111-1307, USA. import os import sys from mic import chroot, msger from mic.utils import cmdln, misc, errors, fs_related from mic.imager import fs from mic.conf import configmgr from mic.plugin import pluginmgr from mic.pluginbase import ImagerPlugin class FsPlugin(ImagerPlugin): name = 'fs' @classmethod @cmdln.option("--include-src", dest="include_src", action="store_true", default=False, help="Generate a image with source rpms included") def do_create(self, subcmd, opts, *args): """${cmd_name}: create fs image Usage: ${name} ${cmd_name} <ksfile> [OPTS] ${cmd_option_list} """ if len(args) != 1: raise errors.Usage("Extra arguments given") creatoropts = configmgr.create ksconf = args[0] if creatoropts['runtime'] == 'bootstrap': configmgr._ksconf = ksconf rt_util.bootstrap_mic() recording_pkgs = [] if len(creatoropts['record_pkgs']) > 0: recording_pkgs = creatoropts['record_pkgs'] if creatoropts['release'] is not None: if 'name' not in recording_pkgs: recording_pkgs.append('name') if 'vcs' not in recording_pkgs: recording_pkgs.append('vcs') configmgr._ksconf = ksconf # Called After setting the configmgr._ksconf as the creatoropts['name'] is reset there. if creatoropts['release'] is not None: creatoropts['outdir'] = "%s/%s/images/%s/" % (creatoropts['outdir'], creatoropts['release'], creatoropts['name']) # try to find the pkgmgr pkgmgr = None backends = pluginmgr.get_plugins('backend') if 'auto' == creatoropts['pkgmgr']: for key in configmgr.prefer_backends: if key in backends: pkgmgr = backends[key] break else: for key in backends.keys(): if key == creatoropts['pkgmgr']: pkgmgr = backends[key] break if not pkgmgr: raise errors.CreatorError("Can't find backend: %s, " "available choices: %s" % (creatoropts['pkgmgr'], ','.join(backends.keys()))) creator = fs.FsImageCreator(creatoropts, pkgmgr) creator._include_src = opts.include_src if len(recording_pkgs) > 0: creator._recording_pkgs = recording_pkgs self.check_image_exists(creator.destdir, creator.pack_to, [creator.name], creatoropts['release']) try: creator.check_depend_tools() creator.mount(None, creatoropts["cachedir"]) creator.install() #Download the source packages ###private options if opts.include_src: installed_pkgs = creator.get_installed_packages() msger.info('--------------------------------------------------') msger.info('Generating the image with source rpms included ...') if not misc.SrcpkgsDownload(installed_pkgs, creatoropts["repomd"], creator._instroot, creatoropts["cachedir"]): msger.warning("Source packages can't be downloaded") creator.configure(creatoropts["repomd"]) creator.copy_kernel() creator.unmount() creator.package(creatoropts["outdir"]) if creatoropts['release'] is not None: creator.release_output(ksconf, creatoropts['outdir'], creatoropts['release']) creator.print_outimage_info() except errors.CreatorError: raise finally: creator.cleanup() msger.info("Finished.") return 0 @classmethod def do_chroot(self, target, cmd=[]):#chroot.py parse opts&args try: if len(cmd) != 0: cmdline = ' '.join(cmd) else: cmdline = "/bin/bash" envcmd = fs_related.find_binary_inchroot("env", target) if envcmd: cmdline = "%s HOME=/root %s" % (envcmd, cmdline) chroot.chroot(target, None, cmdline) finally: chroot.cleanup_after_chroot("dir", None, None, None) return 1
ignorabimus/micropython-c-api
refs/heads/master
micropython/py/makeqstrdata.py
29
""" Process raw qstr file and output qstr data with length, hash and data bytes. This script works with Python 2.6, 2.7, 3.3 and 3.4. """ from __future__ import print_function import re import sys # Python 2/3 compatibility: # - iterating through bytes is different # - codepoint2name lives in a different module import platform if platform.python_version_tuple()[0] == '2': bytes_cons = lambda val, enc=None: bytearray(val) from htmlentitydefs import codepoint2name elif platform.python_version_tuple()[0] == '3': bytes_cons = bytes from html.entities import codepoint2name # end compatibility code codepoint2name[ord('-')] = 'hyphen'; # add some custom names to map characters that aren't in HTML codepoint2name[ord(' ')] = 'space' codepoint2name[ord('\'')] = 'squot' codepoint2name[ord(',')] = 'comma' codepoint2name[ord('.')] = 'dot' codepoint2name[ord(':')] = 'colon' codepoint2name[ord(';')] = 'semicolon' codepoint2name[ord('/')] = 'slash' codepoint2name[ord('%')] = 'percent' codepoint2name[ord('#')] = 'hash' codepoint2name[ord('(')] = 'paren_open' codepoint2name[ord(')')] = 'paren_close' codepoint2name[ord('[')] = 'bracket_open' codepoint2name[ord(']')] = 'bracket_close' codepoint2name[ord('{')] = 'brace_open' codepoint2name[ord('}')] = 'brace_close' codepoint2name[ord('*')] = 'star' codepoint2name[ord('!')] = 'bang' codepoint2name[ord('\\')] = 'backslash' codepoint2name[ord('+')] = 'plus' codepoint2name[ord('$')] = 'dollar' codepoint2name[ord('=')] = 'equals' codepoint2name[ord('?')] = 'question' codepoint2name[ord('@')] = 'at_sign' codepoint2name[ord('^')] = 'caret' codepoint2name[ord('|')] = 'pipe' codepoint2name[ord('~')] = 'tilde' # this must match the equivalent function in qstr.c def compute_hash(qstr, bytes_hash): hash = 5381 for b in qstr: hash = (hash * 33) ^ b # Make sure that valid hash is never zero, zero means "hash not computed" return (hash & ((1 << (8 * bytes_hash)) - 1)) or 1 def qstr_escape(qst): def esc_char(m): c = ord(m.group(0)) try: name = codepoint2name[c] except KeyError: name = '0x%02x' % c return "_" + name + '_' return re.sub(r'[^A-Za-z0-9_]', esc_char, qst) def parse_input_headers(infiles): # read the qstrs in from the input files qcfgs = {} qstrs = {} for infile in infiles: with open(infile, 'rt') as f: for line in f: line = line.strip() # is this a config line? match = re.match(r'^QCFG\((.+), (.+)\)', line) if match: value = match.group(2) if value[0] == '(' and value[-1] == ')': # strip parenthesis from config value value = value[1:-1] qcfgs[match.group(1)] = value continue # is this a QSTR line? match = re.match(r'^Q\((.*)\)$', line) if not match: continue # get the qstr value qstr = match.group(1) # special case to specify control characters if qstr == '\\n': qstr = '\n' # work out the corresponding qstr name ident = qstr_escape(qstr) # don't add duplicates if ident in qstrs: continue # add the qstr to the list, with order number to retain original order in file qstrs[ident] = (len(qstrs), ident, qstr) if not qcfgs: sys.stderr.write("ERROR: Empty preprocessor output - check for errors above\n") sys.exit(1) return qcfgs, qstrs def make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr): qbytes = bytes_cons(qstr, 'utf8') qlen = len(qbytes) qhash = compute_hash(qbytes, cfg_bytes_hash) if all(32 <= ord(c) <= 126 and c != '\\' and c != '"' for c in qstr): # qstr is all printable ASCII so render it as-is (for easier debugging) qdata = qstr else: # qstr contains non-printable codes so render entire thing as hex pairs qdata = ''.join(('\\x%02x' % b) for b in qbytes) if qlen >= (1 << (8 * cfg_bytes_len)): print('qstr is too long:', qstr) assert False qlen_str = ('\\x%02x' * cfg_bytes_len) % tuple(((qlen >> (8 * i)) & 0xff) for i in range(cfg_bytes_len)) qhash_str = ('\\x%02x' * cfg_bytes_hash) % tuple(((qhash >> (8 * i)) & 0xff) for i in range(cfg_bytes_hash)) return '(const byte*)"%s%s" "%s"' % (qhash_str, qlen_str, qdata) def print_qstr_data(qcfgs, qstrs): # get config variables cfg_bytes_len = int(qcfgs['BYTES_IN_LEN']) cfg_bytes_hash = int(qcfgs['BYTES_IN_HASH']) # print out the starter of the generated C header file print('// This file was automatically generated by makeqstrdata.py') print('') # add NULL qstr with no hash or data print('QDEF(MP_QSTR_NULL, (const byte*)"%s%s" "")' % ('\\x00' * cfg_bytes_hash, '\\x00' * cfg_bytes_len)) # go through each qstr and print it out for order, ident, qstr in sorted(qstrs.values(), key=lambda x: x[0]): qbytes = make_bytes(cfg_bytes_len, cfg_bytes_hash, qstr) print('QDEF(MP_QSTR_%s, %s)' % (ident, qbytes)) def do_work(infiles): qcfgs, qstrs = parse_input_headers(infiles) print_qstr_data(qcfgs, qstrs) if __name__ == "__main__": do_work(sys.argv[1:])
Nexenta/s3-tests
refs/heads/master
virtualenv/lib/python2.7/site-packages/boto/cacerts/__init__.py
260
# Copyright 2010 Google Inc. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. #
rob356/SickRage
refs/heads/master
lib/bs4/builder/_htmlparser.py
412
"""Use the HTMLParser library to parse HTML files that aren't too bad.""" __all__ = [ 'HTMLParserTreeBuilder', ] from HTMLParser import ( HTMLParser, HTMLParseError, ) import sys import warnings # Starting in Python 3.2, the HTMLParser constructor takes a 'strict' # argument, which we'd like to set to False. Unfortunately, # http://bugs.python.org/issue13273 makes strict=True a better bet # before Python 3.2.3. # # At the end of this file, we monkeypatch HTMLParser so that # strict=True works well on Python 3.2.2. major, minor, release = sys.version_info[:3] CONSTRUCTOR_TAKES_STRICT = ( major > 3 or (major == 3 and minor > 2) or (major == 3 and minor == 2 and release >= 3)) from bs4.element import ( CData, Comment, Declaration, Doctype, ProcessingInstruction, ) from bs4.dammit import EntitySubstitution, UnicodeDammit from bs4.builder import ( HTML, HTMLTreeBuilder, STRICT, ) HTMLPARSER = 'html.parser' class BeautifulSoupHTMLParser(HTMLParser): def handle_starttag(self, name, attrs): # XXX namespace attr_dict = {} for key, value in attrs: # Change None attribute values to the empty string # for consistency with the other tree builders. if value is None: value = '' attr_dict[key] = value attrvalue = '""' self.soup.handle_starttag(name, None, None, attr_dict) def handle_endtag(self, name): self.soup.handle_endtag(name) def handle_data(self, data): self.soup.handle_data(data) def handle_charref(self, name): # XXX workaround for a bug in HTMLParser. Remove this once # it's fixed. if name.startswith('x'): real_name = int(name.lstrip('x'), 16) elif name.startswith('X'): real_name = int(name.lstrip('X'), 16) else: real_name = int(name) try: data = unichr(real_name) except (ValueError, OverflowError), e: data = u"\N{REPLACEMENT CHARACTER}" self.handle_data(data) def handle_entityref(self, name): character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name) if character is not None: data = character else: data = "&%s;" % name self.handle_data(data) def handle_comment(self, data): self.soup.endData() self.soup.handle_data(data) self.soup.endData(Comment) def handle_decl(self, data): self.soup.endData() if data.startswith("DOCTYPE "): data = data[len("DOCTYPE "):] elif data == 'DOCTYPE': # i.e. "<!DOCTYPE>" data = '' self.soup.handle_data(data) self.soup.endData(Doctype) def unknown_decl(self, data): if data.upper().startswith('CDATA['): cls = CData data = data[len('CDATA['):] else: cls = Declaration self.soup.endData() self.soup.handle_data(data) self.soup.endData(cls) def handle_pi(self, data): self.soup.endData() if data.endswith("?") and data.lower().startswith("xml"): # "An XHTML processing instruction using the trailing '?' # will cause the '?' to be included in data." - HTMLParser # docs. # # Strip the question mark so we don't end up with two # question marks. data = data[:-1] self.soup.handle_data(data) self.soup.endData(ProcessingInstruction) class HTMLParserTreeBuilder(HTMLTreeBuilder): is_xml = False features = [HTML, STRICT, HTMLPARSER] def __init__(self, *args, **kwargs): if CONSTRUCTOR_TAKES_STRICT: kwargs['strict'] = False self.parser_args = (args, kwargs) def prepare_markup(self, markup, user_specified_encoding=None, document_declared_encoding=None): """ :return: A 4-tuple (markup, original encoding, encoding declared within markup, whether any characters had to be replaced with REPLACEMENT CHARACTER). """ if isinstance(markup, unicode): yield (markup, None, None, False) return try_encodings = [user_specified_encoding, document_declared_encoding] dammit = UnicodeDammit(markup, try_encodings, is_html=True) yield (dammit.markup, dammit.original_encoding, dammit.declared_html_encoding, dammit.contains_replacement_characters) def feed(self, markup): args, kwargs = self.parser_args parser = BeautifulSoupHTMLParser(*args, **kwargs) parser.soup = self.soup try: parser.feed(markup) except HTMLParseError, e: warnings.warn(RuntimeWarning( "Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help.")) raise e # Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some # 3.2.3 code. This ensures they don't treat markup like <p></p> as a # string. # # XXX This code can be removed once most Python 3 users are on 3.2.3. if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT: import re attrfind_tolerant = re.compile( r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*' r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?') HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant locatestarttagend = re.compile(r""" <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name (?:\s+ # whitespace before attribute name (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name (?:\s*=\s* # value indicator (?:'[^']*' # LITA-enclosed value |\"[^\"]*\" # LIT-enclosed value |[^'\">\s]+ # bare value ) )? ) )* \s* # trailing whitespace """, re.VERBOSE) BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend from html.parser import tagfind, attrfind def parse_starttag(self, i): self.__starttag_text = None endpos = self.check_for_whole_start_tag(i) if endpos < 0: return endpos rawdata = self.rawdata self.__starttag_text = rawdata[i:endpos] # Now parse the data between i+1 and j into a tag and attrs attrs = [] match = tagfind.match(rawdata, i+1) assert match, 'unexpected call to parse_starttag()' k = match.end() self.lasttag = tag = rawdata[i+1:k].lower() while k < endpos: if self.strict: m = attrfind.match(rawdata, k) else: m = attrfind_tolerant.match(rawdata, k) if not m: break attrname, rest, attrvalue = m.group(1, 2, 3) if not rest: attrvalue = None elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ attrvalue[:1] == '"' == attrvalue[-1:]: attrvalue = attrvalue[1:-1] if attrvalue: attrvalue = self.unescape(attrvalue) attrs.append((attrname.lower(), attrvalue)) k = m.end() end = rawdata[k:endpos].strip() if end not in (">", "/>"): lineno, offset = self.getpos() if "\n" in self.__starttag_text: lineno = lineno + self.__starttag_text.count("\n") offset = len(self.__starttag_text) \ - self.__starttag_text.rfind("\n") else: offset = offset + len(self.__starttag_text) if self.strict: self.error("junk characters in start tag: %r" % (rawdata[k:endpos][:20],)) self.handle_data(rawdata[i:endpos]) return endpos if end.endswith('/>'): # XHTML-style empty tag: <span attr="value" /> self.handle_startendtag(tag, attrs) else: self.handle_starttag(tag, attrs) if tag in self.CDATA_CONTENT_ELEMENTS: self.set_cdata_mode(tag) return endpos def set_cdata_mode(self, elem): self.cdata_elem = elem.lower() self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I) BeautifulSoupHTMLParser.parse_starttag = parse_starttag BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode CONSTRUCTOR_TAKES_STRICT = True
Simran-B/arangodb
refs/heads/docs_3.0
3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/test/test_win32event.py
17
import unittest import win32event import time import os import sys class TestWaitableTimer(unittest.TestCase): def testWaitableFire(self): h = win32event.CreateWaitableTimer(None, 0, None) dt = -160L # 160 ns. win32event.SetWaitableTimer(h, dt, 0, None, None, 0) rc = win32event.WaitForSingleObject(h, 1000) self.failUnlessEqual(rc, win32event.WAIT_OBJECT_0) def testWaitableTrigger(self): h = win32event.CreateWaitableTimer(None, 0, None) # for the sake of this, pass a long that doesn't fit in an int. dt = -2000000000L win32event.SetWaitableTimer(h, dt, 0, None, None, 0) rc = win32event.WaitForSingleObject(h, 10) # 10 ms. self.failUnlessEqual(rc, win32event.WAIT_TIMEOUT) if __name__=='__main__': unittest.main()
bitsgalore/omSipCreator
refs/heads/master
omSipCreator/cdinfo.py
2
#! /usr/bin/env python """Wrapper module for reading and parsing cd-info output""" import io from lxml import etree from . import shared from . import config def parseCDInfoLog(fileCDInfo): """Determine carrier type and number of sessions on carrier""" # Create cd-info element cdInfoName = etree.QName(config.cdInfo_ns, "cd-info") cdInfoElt = etree.Element( cdInfoName, nsmap=config.NSMAP) # Add trackList and analysisReport elements trackListElt = etree.SubElement(cdInfoElt, "{%s}trackList" % (config.cdInfo_ns)) analysisReportElt = etree.SubElement(cdInfoElt, "{%s}analysisReport" % (config.cdInfo_ns)) # Open cd-info log file and read to list outAsList = [] with io.open(fileCDInfo, "r", encoding="utf-8") as fCdInfoLogFile: for line in fCdInfoLogFile: line = line.strip() outAsList.append(line) fCdInfoLogFile.close() # Set up list and empty string for storing analysis report analysisReport = [] analysisReportString = '' # Initialise variable that reports LSN of data track dataTrackLSNStart = 0 # Locate track list and analysis report in cd-info output startIndexTrackList = shared.index_startswith_substring(outAsList, "CD-ROM Track List") startIndexAnalysisReport = shared.index_startswith_substring(outAsList, "CD Analysis Report") # Parse track list and store interesting bits in dictionary for i in range(startIndexTrackList + 2, startIndexAnalysisReport - 1, 1): thisTrack = outAsList[i] if not thisTrack.startswith("++"): # This gets rid of warning messages, do we want that? thisTrack = thisTrack.split(": ") trackNumber = int(thisTrack[0].strip()) trackDetails = thisTrack[1].split() trackMSFStart = trackDetails[0] # Minute:Second:Frame trackLSNStart = trackDetails[1] # Logical Sector Number trackType = trackDetails[2] # Track type: audio / data trackGreen = trackDetails[3] # Don't know what this means trackCopy = trackDetails[4] # Don't know what this means if trackType == 'audio': trackChannels = trackDetails[5] trackPreemphasis = trackDetails[6] if trackType == 'data': dataTrackLSNStart = int(trackLSNStart) # Append properties to trackList trackElt = etree.SubElement(trackListElt, "{%s}track" % (config.cdInfo_ns)) trackNumberElt = etree.SubElement(trackElt, "{%s}trackNumber" % (config.cdInfo_ns)) trackNumberElt.text = str(trackNumber) MSFElt = etree.SubElement(trackElt, "{%s}MSF" % (config.cdInfo_ns)) MSFElt.text = trackMSFStart LSNElt = etree.SubElement(trackElt, "{%s}LSN" % (config.cdInfo_ns)) LSNElt.text = str(trackLSNStart) TypeElt = etree.SubElement(trackElt, "{%s}Type" % (config.cdInfo_ns)) TypeElt.text = trackType if trackType != 'leadout': GreenElt = etree.SubElement(trackElt, "{%s}Green" % (config.cdInfo_ns)) GreenElt.text = trackGreen CopyElt = etree.SubElement(trackElt, "{%s}Copy" % (config.cdInfo_ns)) CopyElt.text = trackCopy if trackType == 'audio': ChannelsElt = etree.SubElement(trackElt, "{%s}Channels" % (config.cdInfo_ns)) ChannelsElt.text = trackChannels PreemphasisElt = etree.SubElement(trackElt, "{%s}Preemphasis" % (config.cdInfo_ns)) PreemphasisElt.text = trackPreemphasis # Parse analysis report for i in range(startIndexAnalysisReport + 1, len(outAsList), 1): thisLine = outAsList[i] analysisReport.append(thisLine) analysisReportString = analysisReportString + thisLine + "\n" # Flags for CD/Extra / multisession / mixed-mode # Note that single-session mixed mode CDs are erroneously reported as # multisession by libcdio. See: http://savannah.gnu.org/bugs/?49090#comment1 cdExtra = shared.index_startswith_substring(analysisReport, "CD-Plus/Extra") != -1 multiSession = shared.index_startswith_substring(analysisReport, "session #") != -1 mixedMode = shared.index_startswith_substring(analysisReport, "mixed mode CD") != -1 # Add individual parsed values from analysis report to separate subelements cdExtraElt = etree.SubElement(analysisReportElt, "{%s}cdExtra" % (config.cdInfo_ns)) cdExtraElt.text = str(cdExtra) multiSessionElt = etree.SubElement(analysisReportElt, "{%s}multiSession" % (config.cdInfo_ns)) multiSessionElt.text = str(multiSession) mixedModeElt = etree.SubElement(analysisReportElt, "{%s}mixedMode" % (config.cdInfo_ns)) mixedModeElt.text = str(mixedMode) # Add unformatted analysis report to analysisReportFullElt element analysisReportFullElt = etree.SubElement(analysisReportElt, "{%s}fullReport" % (config.cdInfo_ns)) analysisReportFullElt.text = analysisReportString return cdInfoElt, dataTrackLSNStart
mcfletch/AutobahnPython
refs/heads/master
examples/twisted/wamp/basic/rpc/slowsquare/__init__.py
561
############################################################################### ## ## Copyright (C) 2014 Tavendo GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ###############################################################################