repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
tempbottle/h-store | refs/heads/master | tools/traces/mysql-to-json.py | 9 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import re
import json
import logging
import getopt
import string
import time
from pprint import pprint
import MySQLdb
from hstoretraces import *
logging.basicConfig(level = logging.INFO,
format="%(asctime)s [%(funcName)s:%(lineno)03d] %(levelname)-5s: %(message)s",
datefmt="%m-%d-%Y %H:%M:%S",
stream = sys.stdout)
## ==============================================
## GLOBAL CONFIGURATION PARAMETERS
## ==============================================
DB_HOST = None
DB_USER = None
DB_PASSWORD = None
DB_PORT = 3306
DB_NAME = "tpce"
DB_TABLE = "metarelcloud_general_log"
OUTPUT_FILE = "-"
OUTPUT_FD = None
## Catalog Information
CATALOG_PROCEDURES = { }
CATALOG_TABLES = { }
PROCEDURES = { }
PROCEDURE_COUNT = { }
## Procedures that we want to print out debug statements
DEBUG_PROCEDURES = [ ]
STRICT_MATCHING = False
NESTED_SELECT_MARKER = "XYZ"
NESTED_SELECT_REGEX = re.compile("(?:FROM|IN)[\s]+\([\s]*(SELECT .*?)[\s]*\) (?:AS|ORDER)", re.IGNORECASE)
## If a query contains one of these keywords, then just skip it
SKIP_KEYWORDS = map(string.lower, [
"LAST_INSERT_ID",
"CONCAT(ex_desc, ",
## CustomerPosition
"FROM (%s) AS t, trade, trade_history, status_type" % NESTED_SELECT_MARKER,
## MarketWatch
"COALESCE(",
## DataMaintenance
"AND s_symb NOT IN",
"UPDATE watch_item, watch_list",
"SET ex_desc = INSERT(ex_desc",
])
## ==============================================
## splitNestedSelect
## ==============================================
def splitNestedSelect(sql):
match = NESTED_SELECT_REGEX.search(sql)
assert match, "Failed to extract nested SELECT statement from '%s'" % sql
inner = match.group(1).strip()
outer = sql.replace(inner, NESTED_SELECT_MARKER)
logging.info("Successfully extracted nested SELECT statement")
logging.info("INNER: %s" % inner)
logging.info("OUTER: %s" % outer)
return [outer, inner]
## DEF
## ==============================================
## Auto-Reconnecting DB Wrapper
## ==============================================
class DB:
conn = None
def __init__(self, host, user, passwd, name, port):
self.host = host
self.user = user
self.passwd = passwd
self.name = name
self.port = int(port)
def connect(self):
self.conn = MySQLdb.connect(host=self.host, user=self.user, passwd=self.passwd, db=self.name, port=self.port)
def query(self, sql):
try:
cursor = self.conn.cursor()
cursor.execute(sql)
except (AttributeError, MySQLdb.OperationalError):
self.connect()
cursor = self.conn.cursor()
cursor.execute(sql)
return cursor
## CLASS
## ==============================================
## main
## ==============================================
if __name__ == '__main__':
_options, args = getopt.gnu_getopt(sys.argv[1:], '', [
## Database Options
"host=",
"user=",
"password=",
"port=",
"name=",
"table=",
## JSON file for mapping txn params from query params
"params-map=",
## Limit the number of queries fetched per thread
"limit=",
## Trace Output File
"output=",
## Separate Output File per Thread
"output-per-thread",
## JSON Catalog File
"catalog=",
## Force Thread Id
"thread=",
## Take all threads greater than ones provided with --thread=
"thread-greater",
## When strict matching is enabled, the script will fail if a query/txn can't be matched
"strict",
## Enable debug logging
"debug",
## Enable debug logging for a specific procedure
"debug-proc=",
])
## ----------------------------------------------
## COMMAND OPTIONS
## ----------------------------------------------
options = { }
for key, value in _options:
if key.startswith("--"): key = key[2:]
if key in options:
options[key].append(value)
else:
options[key] = [ value ]
## FOR
if "debug" in options: logging.getLogger().setLevel(logging.DEBUG)
if "debug-proc" in options: DEBUG_PROCEDURES = list(options["debug-proc"])
if "strict" in options: STRICT_MATCHING = True
for key in [ "host", "user", "password", "port", "name", "table" ]:
varname = "DB_" + key.upper()
if key in options:
value = options[key][0]
assert value
locals()[varname] = value
## IF
if not locals()[varname]:
logging.fatal("Missing parameter '%s'" % key)
sys.exit(1)
## FOR
## ----------------------------------------------
## Load in catalog JSON
## ----------------------------------------------
CATALOG_PROCEDURES, CATALOG_TABLES = loadCatalog(options["catalog"][0])
## Create all the StoredProcedure objects
for proc_name in CATALOG_PROCEDURES.keys():
debug = (proc_name in DEBUG_PROCEDURES)
PROCEDURES[proc_name] = StoredProcedure(proc_name, CATALOG_PROCEDURES[proc_name], CATALOG_TABLES, debug)
PROCEDURE_COUNT[proc_name] = 0
## FOR
## ----------------------------------------------
## Load in parameters mapping
## ----------------------------------------------
if "params-map" in options:
loadParameterMapping(options["params-map"][0], PROCEDURES)
## ----------------------------------------------
## Connect to DB
## ----------------------------------------------
logging.info("Connecting to %s@%s:%s" % (DB_USER, DB_HOST, DB_PORT))
db = DB(DB_HOST, DB_USER, DB_PASSWORD, DB_NAME, DB_PORT)
if not "thread" in options or "thread-greater" in options:
sql = "SELECT DISTINCT thread_id FROM %s" % DB_TABLE
if "thread-greater" in options and "thread" in options:
options["thread"] = map(int, options["thread"])
sql += " WHERE thread_id > %d" % max(options["thread"])
cursor = db.query(sql)
thread_ids = [ ]
for row in cursor.fetchall():
thread_ids.append(int(row[0]))
## FOR
else:
thread_ids = map(int, options["thread"])
## IF
logging.info("Number of threads: %d" % len(thread_ids))
## ----------------------------------------------
## Process Thread Workload Logs
## ----------------------------------------------
query_ctr = 0
for thread_id in thread_ids:
logging.info("Grabbing queries for thread #%d" % thread_id)
sql = """
SELECT event_time, command_type, argument
FROM %s
WHERE thread_id = %d
ORDER BY event_time ASC""" % (DB_TABLE, thread_id)
if "limit" in options: sql += " LIMIT %d" % int(options["limit"][0])
cursor = db.query(sql)
## ----------------------------------------------
## Open output file
## ----------------------------------------------
if "output" in options:
OUTPUT_FILE = options["output"][0]
if "output-per-thread" in options:
if OUTPUT_FD: OUTPUT_FD.close()
OUTPUT_FILE += ".%d" % thread_id
OUTPUT_FD = open(OUTPUT_FILE, "w")
elif not OUTPUT_FD:
OUTPUT_FD = open(OUTPUT_FILE, "w")
else:
OUTPUT_FILE = "stdout"
OUTPUT_FD = sys.stdout
## IF
logging.info("Writing out traces to '%s'" % OUTPUT_FILE)
ctr = -1
current_procedure = None ## StoredProcedure
current_txn = None ## TransactionTrace
need_to_fail = None
for row in cursor.fetchall():
ctr += 1
queries = [ row ]
while len(queries) > 0:
next_row = queries.pop(0)
event_time = next_row[0]
command = next_row[1]
query = next_row[2]
query_lower = query.lower()
debug_str = "[%d] %s" % (ctr, " - ".join(map(str, [command, query, event_time])))
logging.debug(debug_str)
if need_to_fail != None:
need_to_fail -= 1
if need_to_fail == 0: sys.exit(1)
continue
## ----------------------------------------------
## SKIP
## ----------------------------------------------
if len([ keyword for keyword in SKIP_KEYWORDS if query_lower.find(keyword) != -1]) > 0:
logging.info("Query [%d] contains skip keywords. Ignoring..." % ctr)
#logging.info(debug_str)
continue
## ----------------------------------------------
## SPECIAL CASE: Nested SELECT
## ----------------------------------------------
elif NESTED_SELECT_REGEX.search(query):
for query in splitNestedSelect(query):
queries.append([ event_time, command, query ])
logging.debug("Queued up extracted query from nested select: %s" % query)
## FOR
assert queries
continue
## ----------------------------------------------
## PREPARE
## ----------------------------------------------
elif command == "Prepare":
## Figure out what stored procedure they are executing
if query.find("SET TRANSACTION") != -1: continue
## Check whether this is the start of a new procedure
#logging.debug("Current Procedure: %s" % str(current_procedure))
current_procedure = findCatalogProcedureMatch(query, PROCEDURES)
if not current_procedure:
msg = "Failed to identify procedure: %s" % debug_str
if STRICT_MATCHING: assert current_procedure, msg
logging.warn(msg)
continue
## IF
assert current_procedure
current_procedure.addQuery(query)
current_procedure = None
## ----------------------------------------------
## QUERY
## ----------------------------------------------
elif command == "Query":
if query in ["commit", "rollback"]:
## If we don't have a current_txn, then that means that we
## weren't able to figure out what procedure the txn was
## We'll just ignore everything
if not current_txn.catalog_name:
logging.debug("Got commit message but we weren't able to identify the procedure. Ignoring...")
query_traces = current_txn.getQueries()
assert not query_traces, "Unknown transaction has queries\n" + "\n".join([ "[%d] %s" % (i, query_traces[i].orig_query) for i in range(len(query_traces)) ])
if not UNKNOWN_PROCEDURE in PROCEDURE_COUNT: PROCEDURE_COUNT[UNKNOWN_PROCEDURE] = 0
PROCEDURE_COUNT[UNKNOWN_PROCEDURE] += 1
else:
num_queries = len(current_txn.getQueries())
assert num_queries > 0, "Txn #%d %s does not have any queries in it" % (current_txn.txn_id, current_txn.catalog_name)
query_ctr += num_queries
logging.info("Marking txn #%d %s as completed with %d queries [thread=%d]" % (current_txn.txn_id, current_txn.catalog_name, num_queries, thread_id))
#debug_str = "\n"
#for tuple in current_txn.queries:
#debug_str += str(tuple) + "\n"
### FOR
#logging.info(debug_str)
#logging.info("-"*32)
#debug_str = "\n"
#for i in range(len(current_procedure.orig_queries)):
#debug_str += "[%d] %s\n" % (i, current_procedure.orig_queries[i])
#logging.info(debug_str)
#logging.debug("="*100)
#sys.exit(1)
### IF
PROCEDURE_COUNT[current_txn.catalog_name] += 1
## Set the final attributes for this txn
aborted = (query == "rollback")
current_txn.finish(current_procedure, event_time, aborted = aborted)
#if aborted or current_txn.catalog_name == "BrokerVolume":
#print "-"*100
#print
#print json.dumps(current_txn.toJSON(), indent=2)
#sys.exit(0)
## And then write it out to the trace file and reset ourselves
writeJSON(current_txn.toJSON(), OUTPUT_FD)
## IF
logging.debug("Reseting current txn and procedure variables")
current_txn = None
current_procedure = None
## IF
## ----------------------------------------------
## EXECUTE
## ----------------------------------------------
elif command == "Execute":
## Start of a new txn
if query.find("SET TRANSACTION") != -1:
assert not current_txn
## Create a new TransactionTrace object, even though at this
## point we don't know what procedure it is
logging.debug("Starting a new transaction. Reseting current procedure variable")
current_txn = TransactionTrace(nextTraceId(),
event_time,
False,
STRICT_MATCHING)
current_procedure = None
## Executing a query in this txn
else:
# logging.debug(debug_str)
assert current_txn, "Executing a query before the txn started: %s" % debug_str
## Figure out what Stored Procedure they have if this is the first query
exclude_procs = set()
while True:
debug = False
if exclude_procs:
logging.info("Trying to re-match first query in transaction: %s [excluded=%s]" % (query, str(exclude_procs)))
debug = True
if not current_procedure:
logging.debug("Trying to match first query in transaction. Excluded = %s" % str(exclude_procs))
current_procedure = findCatalogProcedureMatch(query, PROCEDURES, exclude_procs, debug = debug)
if not current_procedure:
logging.warn("Failed to match first query for new txn: %s" % debug_str)
if STRICT_MATCHING:
query_traces = current_txn.getQueries()
assert current_procedure, "Unknown Query for '%s'\n" % (current_txn.catalog_name) + "\n".join([ "[%03d] %s" % (i, query_traces[i].orig_query) for i in range(len(query_traces)) ])
break
elif current_txn.catalog_name and current_txn.catalog_name != current_procedure.proc_name:
logging.info("Switched Transaction from %s to %s" % (current_txn.catalog_name, current_procedure.proc_name))
## Rebuild the txn
orig_txn = current_txn
current_txn = TransactionTrace(
orig_txn.id,
orig_txn.start_timestamp,
current_procedure.debug,
STRICT_MATCHING)
current_txn.catalog_name = current_procedure.proc_name
for orig_query_trace in orig_txn.getQueries():
(catalog_name, params) = current_procedure.parseQuery(orig_query_trace.orig_query)
if catalog_name == WRONG_PROCEDURE_MATCH:
logging.fatal("Conflicting Procedure queries:\n %s.%s: %s\n %s.%s: %s" % (orig_txn.name, orig_query_trace.catalog_name, orig_query_trace.orig_query, current_txn.catalog_name, "XXX", query))
sys.exit(1)
logging.info("%s.%s -> %s.%s: %s" % (orig_txn.catalog_name, orig_query_trace.catalog_name, current_txn.catalog_name, catalog_name, orig_query_trace.orig_query))
current_txn.addQuery(QueryTrace(
orig_query_trace.id,
orig_query_trace.orig_query,
catalog_name,
current_txn.catalog_name,
params,
orig_query_trace.start_timestamp))
## FOR
assert len(orig_txn.getQueries()) == len(current_txn.getQueries())
logging.info("Copied %d queries into new Transaction instance '%s'" % (len(orig_txn.getQueries()), current_txn.catalog_name))
logging.debug("Identified query as part of '%s'" % current_procedure.proc_name)
## IF
assert current_procedure
if not current_txn.catalog_name and current_procedure:
current_txn.catalog_name = current_procedure.proc_name
logging.debug("Selected current transaction procedure as '%s'" % current_txn.catalog_name)
assert current_txn.catalog_name, "Unknown Procedure: %s" % debug_str
## This will retrieve the original parameterized query and extract its
## parameters from the query that we just executed. We then need to
## take this information and figure out what query it corresponds
## with in our catalog
logging.debug("Extracting query catalog name and parameters from query in '%s'" % current_procedure)
(catalog_name, params) = current_procedure.parseQuery(query)
## If somebody up above says that we're in the wrong procedure,
## then we'll loop back around and try to correct our mistake
if catalog_name == WRONG_PROCEDURE_MATCH:
logging.info("Incorrectly labeled txn as '%s' because query doesn't match anything. Retrying: %s" % (current_txn.catalog_name, query))
current_procedure = None
exclude_procs.add(current_txn.catalog_name)
continue
## IF
current_query = QueryTrace(nextTraceId(),
query,
catalog_name,
current_txn.catalog_name,
params,
event_time)
current_txn.addQuery(current_query)
#if catalog_name == "get" and current_txn.catalog_name == "BrokerVolume":
#print "-"*60
#print current_procedure.catalog_proc[catalog_name]
#print
#print current_query.toJSON()
#sys.exit(1)
break
## WHILE
## IF
## ----------------------------------------------
## Not needed?
## ----------------------------------------------
else:
logging.debug("Unused event: %s" % debug_str)
continue
## IF
## WHILE (queries)
## FOR (cursor)
logging.info("Procedure count after thread #%d" % thread_id)
pprint(PROCEDURE_COUNT)
print "Total # of Queries:", query_ctr
if UNKNOWN_PROCEDURE in PROCEDURE_COUNT: logging.warn("Unknown procedures in thread #%d" % thread_id)
## FOR
## MAIN |
opencog/link-grammar | refs/heads/master | bindings/python-examples/example.py | 3 | #! /usr/bin/env python3
# -*- coding: utf8 -*-
"""Link Grammar example usage"""
from linkgrammar import Sentence, ParseOptions, Dictionary, Clinkgrammar as clg
print("Version:", clg.linkgrammar_get_version())
po = ParseOptions(verbosity=1)
def desc(lkg):
print(lkg.diagram())
print('Postscript:')
print(lkg.postscript())
print('---')
def s(q):
return '' if q == 1 else 's'
def linkage_stat(psent, lang, lkgs, sent_po):
"""
This function mimics the linkage status report style of link-parser
"""
random = ' of {} random linkages'. \
format(clg.sentence_num_linkages_post_processed((psent._obj))) \
if clg.sentence_num_linkages_found(psent._obj) > sent_po.linkage_limit else ''
print('{}: Found {} linkage{} ({}{} had no P.P. violations)'.
format(lang, clg.sentence_num_linkages_found(psent._obj),
s(clg.sentence_num_linkages_found(psent._obj)), len(lkgs), random))
en_lines = [
'This is a test.',
'I feel is the exciter than other things', # from issue #303 (10 linkages)
]
po = ParseOptions(min_null_count=0, max_null_count=999)
#po.linkage_limit = 3
# English is the default language
en_dir = Dictionary() # open the dictionary only once
for text in en_lines:
sent = Sentence(text, en_dir, po)
linkages = sent.parse()
linkage_stat(sent, 'English', linkages, po)
for linkage in linkages:
desc(linkage)
# Russian
sent = Sentence("Целью курса является обучение магистрантов основам построения и функционирования программного обеспечения сетей ЭВМ.", Dictionary('ru'), po)
linkages = sent.parse()
linkage_stat(sent, 'Russian', linkages, po)
for linkage in linkages:
desc(linkage)
# Turkish
po = ParseOptions(islands_ok=True, max_null_count=1, display_morphology=True, verbosity=1)
sent = Sentence("Senin ne istediğini bilmiyorum", Dictionary('tr'), po)
linkages = sent.parse()
linkage_stat(sent, 'Turkish', linkages, po)
for linkage in linkages:
desc(linkage)
# Prevent interleaving "Dictionary close" messages
po = ParseOptions(verbosity=0)
|
l5h5t7/ZeroNet | refs/heads/master | plugins/disabled-Bootstrapper/Test/TestBootstrapper.py | 3 | import hashlib
import os
import pytest
from Bootstrapper import BootstrapperPlugin
from Bootstrapper.BootstrapperDb import BootstrapperDb
from Peer import Peer
from Crypt import CryptRsa
from util import helper
@pytest.fixture()
def bootstrapper_db(request):
BootstrapperPlugin.db.close()
BootstrapperPlugin.db = BootstrapperDb()
BootstrapperPlugin.db.createTables() # Reset db
BootstrapperPlugin.db.cur.logging = True
def cleanup():
BootstrapperPlugin.db.close()
os.unlink(BootstrapperPlugin.db.db_path)
request.addfinalizer(cleanup)
return BootstrapperPlugin.db
@pytest.mark.usefixtures("resetSettings")
class TestBootstrapper:
def testIp4(self, file_server, bootstrapper_db):
peer = Peer("127.0.0.1", 1544, connection_server=file_server)
hash1 = hashlib.sha256("site1").digest()
hash2 = hashlib.sha256("site2").digest()
hash3 = hashlib.sha256("site3").digest()
# Verify empty result
res = peer.request("announce", {
"hashes": [hash1, hash2],
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
})
assert len(res["peers"][0]["ip4"]) == 0 # Empty result
# Verify added peer on previous request
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=15441, hashes=[hash1, hash2], delete_missing_hashes=True)
res = peer.request("announce", {
"hashes": [hash1, hash2],
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
})
assert len(res["peers"][0]["ip4"]) == 1
assert len(res["peers"][1]["ip4"]) == 1
# hash2 deleted from 1.2.3.4
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=15441, hashes=[hash1], delete_missing_hashes=True)
res = peer.request("announce", {
"hashes": [hash1, hash2],
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
})
assert len(res["peers"][0]["ip4"]) == 1
assert len(res["peers"][1]["ip4"]) == 0
# Announce 3 hash again
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=15441, hashes=[hash1, hash2, hash3], delete_missing_hashes=True)
res = peer.request("announce", {
"hashes": [hash1, hash2, hash3],
"port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
})
assert len(res["peers"][0]["ip4"]) == 1
assert len(res["peers"][1]["ip4"]) == 1
assert len(res["peers"][2]["ip4"]) == 1
# Single hash announce
res = peer.request("announce", {
"hashes": [hash1], "port": 15441, "need_types": ["ip4"], "need_num": 10, "add": ["ip4"]
})
assert len(res["peers"][0]["ip4"]) == 1
# Test DB cleanup
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer").fetchone()["num"] == 1 # 127.0.0.1 never get added to db
# Delete peers
bootstrapper_db.execute("DELETE FROM peer WHERE ip4 = '1.2.3.4'")
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer_to_hash").fetchone()["num"] == 0
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM hash").fetchone()["num"] == 3 # 3 sites
assert bootstrapper_db.execute("SELECT COUNT(*) AS num FROM peer").fetchone()["num"] == 0 # 0 peer
def testPassive(self, file_server, bootstrapper_db):
peer = Peer("127.0.0.1", 1544, connection_server=file_server)
hash1 = hashlib.sha256("hash1").digest()
bootstrapper_db.peerAnnounce(ip4=None, port=15441, hashes=[hash1])
res = peer.request("announce", {
"hashes": [hash1], "port": 15441, "need_types": ["ip4"], "need_num": 10, "add": []
})
assert len(res["peers"][0]["ip4"]) == 0 # Empty result
def testAddOnion(self, file_server, site, bootstrapper_db, tor_manager):
onion1 = tor_manager.addOnion()
onion2 = tor_manager.addOnion()
peer = Peer("127.0.0.1", 1544, connection_server=file_server)
hash1 = hashlib.sha256("site1").digest()
hash2 = hashlib.sha256("site2").digest()
hash3 = hashlib.sha256("site3").digest()
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=1234, hashes=[hash1, hash2, hash3])
res = peer.request("announce", {
"onions": [onion1, onion1, onion2],
"hashes": [hash1, hash2, hash3], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
})
assert len(res["peers"][0]["ip4"]) == 1
# Onion address not added yet
site_peers = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
assert len(site_peers["onion"]) == 0
assert "onion_sign_this" in res
# Sign the nonces
sign1 = CryptRsa.sign(res["onion_sign_this"], tor_manager.getPrivatekey(onion1))
sign2 = CryptRsa.sign(res["onion_sign_this"], tor_manager.getPrivatekey(onion2))
# Bad sign (different address)
res = peer.request("announce", {
"onions": [onion1], "onion_sign_this": res["onion_sign_this"],
"onion_signs": {tor_manager.getPublickey(onion2): sign2},
"hashes": [hash1], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
})
assert "onion_sign_this" in res
site_peers1 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
assert len(site_peers1["onion"]) == 0 # Not added
# Bad sign (missing one)
res = peer.request("announce", {
"onions": [onion1, onion1, onion2], "onion_sign_this": res["onion_sign_this"],
"onion_signs": {tor_manager.getPublickey(onion1): sign1},
"hashes": [hash1, hash2, hash3], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
})
assert "onion_sign_this" in res
site_peers1 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
assert len(site_peers1["onion"]) == 0 # Not added
# Good sign
res = peer.request("announce", {
"onions": [onion1, onion1, onion2], "onion_sign_this": res["onion_sign_this"],
"onion_signs": {tor_manager.getPublickey(onion1): sign1, tor_manager.getPublickey(onion2): sign2},
"hashes": [hash1, hash2, hash3], "port": 15441, "need_types": ["ip4", "onion"], "need_num": 10, "add": ["onion"]
})
assert "onion_sign_this" not in res
# Onion addresses added
site_peers1 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash1)
assert len(site_peers1["onion"]) == 1
site_peers2 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash2)
assert len(site_peers2["onion"]) == 1
site_peers3 = bootstrapper_db.peerList(ip4="1.2.3.4", port=1234, hash=hash3)
assert len(site_peers3["onion"]) == 1
assert site_peers1["onion"][0] == site_peers2["onion"][0]
assert site_peers2["onion"][0] != site_peers3["onion"][0]
assert helper.unpackOnionAddress(site_peers1["onion"][0])[0] == onion1+".onion"
assert helper.unpackOnionAddress(site_peers2["onion"][0])[0] == onion1+".onion"
assert helper.unpackOnionAddress(site_peers3["onion"][0])[0] == onion2+".onion"
tor_manager.delOnion(onion1)
tor_manager.delOnion(onion2)
def testRequestPeers(self, file_server, site, bootstrapper_db, tor_manager):
site.connection_server = file_server
site.connection_server.tor_manager = tor_manager
hash = hashlib.sha256(site.address).digest()
# Request peers from tracker
assert len(site.peers) == 0
bootstrapper_db.peerAnnounce(ip4="1.2.3.4", port=1234, hashes=[hash])
site.announceTracker("zero", "127.0.0.1:1544")
assert len(site.peers) == 1
# Test onion address store
bootstrapper_db.peerAnnounce(onion="bka4ht2bzxchy44r", port=1234, hashes=[hash], onion_signed=True)
site.announceTracker("zero", "127.0.0.1:1544")
assert len(site.peers) == 2
assert "bka4ht2bzxchy44r.onion:1234" in site.peers
|
wfxiang08/django178 | refs/heads/master | django/__init__.py | 1 | # -*- coding:utf-8 -*-
VERSION = (1, 7, 8, 'final', 0)
def get_version(*args, **kwargs):
# Don't litter django/__init__.py with all the get_version stuff.
# Only import if it's actually called.
from django.utils.version import get_version
return get_version(*args, **kwargs)
def setup():
"""
Configure the settings (this happens as a side effect of accessing the
first setting), configure logging and populate the app registry.
"""
from django.apps import apps
from django.conf import settings
from django.utils.log import configure_logging
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
apps.populate(settings.INSTALLED_APPS)
_gevent = None
def using_gevent():
global _gevent
if _gevent is None:
from django.conf import settings
_gevent = settings.USING_GEVENT
return _gevent |
renegelinas/mi-instrument | refs/heads/master | mi/instrument/nobska/mavs4/playback/__init__.py | 59 | __author__ = 'petercable'
|
tsufiev/horizon | refs/heads/master | openstack_dashboard/dashboards/project/networks/workflows.py | 5 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
import netaddr
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.subnets import utils
LOG = logging.getLogger(__name__)
class CreateNetworkInfoAction(workflows.Action):
net_name = forms.CharField(max_length=255,
label=_("Network Name"),
required=False)
if api.neutron.is_port_profiles_supported():
widget = None
else:
widget = forms.HiddenInput()
net_profile_id = forms.ChoiceField(label=_("Network Profile"),
required=False,
widget=widget)
admin_state = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"),
help_text=_("The state to start"
" the network in."))
def __init__(self, request, *args, **kwargs):
super(CreateNetworkInfoAction, self).__init__(request,
*args, **kwargs)
if api.neutron.is_port_profiles_supported():
self.fields['net_profile_id'].choices = (
self.get_network_profile_choices(request))
def get_network_profile_choices(self, request):
profile_choices = [('', _("Select a profile"))]
for profile in self._get_profiles(request, 'network'):
profile_choices.append((profile.id, profile.name))
return profile_choices
def _get_profiles(self, request, type_p):
profiles = []
try:
profiles = api.neutron.profile_list(request, type_p)
except Exception:
msg = _('Network Profiles could not be retrieved.')
exceptions.handle(request, msg)
return profiles
# TODO(absubram): Add ability to view network profile information
# in the network detail if a profile is used.
class Meta(object):
name = _("Network")
help_text = _("Create a new network. "
"In addition, a subnet associated with the network "
"can be created in the next panel.")
class CreateNetworkInfo(workflows.Step):
action_class = CreateNetworkInfoAction
contributes = ("net_name", "admin_state", "net_profile_id")
class CreateSubnetInfoAction(workflows.Action):
with_subnet = forms.BooleanField(label=_("Create Subnet"),
widget=forms.CheckboxInput(attrs={
'class': 'switchable',
'data-slug': 'with_subnet',
'data-hide-tab': 'create_network__'
'createsubnetdetail'
'action',
'data-hide-on-checked': 'false'
}),
initial=True,
required=False)
subnet_name = forms.CharField(max_length=255,
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'with_subnet',
}),
label=_("Subnet Name"),
required=False)
cidr = forms.IPField(label=_("Network Address"),
required=False,
initial="",
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'with_subnet',
'data-is-required': 'true'
}),
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24, 2001:DB8::/48)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')],
widget=forms.Select(attrs={
'class': 'switchable switched',
'data-slug': 'ipversion',
'data-switch-on': 'with_subnet'
}),
label=_("IP Version"))
gateway_ip = forms.IPField(
label=_("Gateway IP"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'with_subnet gateway_ip'
}),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254) "
"The default value is the first IP of the "
"network address "
"(e.g. 192.168.0.1 for 192.168.0.0/24, "
"2001:DB8::1 for 2001:DB8::/48). "
"If you use the default, leave blank. "
"If you do not want to use a gateway, "
"check 'Disable Gateway' below."),
version=forms.IPv4 | forms.IPv6,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
widget=forms.CheckboxInput(attrs={
'class': 'switched switchable',
'data-slug': 'gateway_ip',
'data-switch-on': 'with_subnet',
'data-hide-on-checked': 'true'
}),
initial=False,
required=False)
msg = _('Specify "Network Address" or '
'clear "Create Subnet" checkbox.')
class Meta(object):
name = _("Subnet")
help_text = _('Create a subnet associated with the new network, '
'in which case "Network Address" must be specified. '
'If you wish to create a network without a subnet, '
'uncheck the "Create Subnet" checkbox.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetInfoAction, self).__init__(request, context, *args,
**kwargs)
if not getattr(settings, 'OPENSTACK_NEUTRON_NETWORK',
{}).get('enable_ipv6', True):
self.fields['ip_version'].widget = forms.HiddenInput()
self.fields['ip_version'].initial = 4
def _check_subnet_data(self, cleaned_data, is_create=True):
cidr = cleaned_data.get('cidr')
ip_version = int(cleaned_data.get('ip_version'))
gateway_ip = cleaned_data.get('gateway_ip')
no_gateway = cleaned_data.get('no_gateway')
if not cidr:
raise forms.ValidationError(self.msg)
if cidr:
subnet = netaddr.IPNetwork(cidr)
if subnet.version != ip_version:
msg = _('Network Address and IP version are inconsistent.')
raise forms.ValidationError(msg)
if (ip_version == 4 and subnet.prefixlen == 32) or \
(ip_version == 6 and subnet.prefixlen == 128):
msg = _("The subnet in the Network Address is "
"too small (/%s).") % subnet.prefixlen
raise forms.ValidationError(msg)
if not no_gateway and gateway_ip:
if netaddr.IPAddress(gateway_ip).version is not ip_version:
msg = _('Gateway IP and IP version are inconsistent.')
raise forms.ValidationError(msg)
if not is_create and not no_gateway and not gateway_ip:
msg = _('Specify IP address of gateway or '
'check "Disable Gateway".')
raise forms.ValidationError(msg)
def clean(self):
cleaned_data = super(CreateSubnetInfoAction, self).clean()
with_subnet = cleaned_data.get('with_subnet')
if not with_subnet:
return cleaned_data
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(workflows.Step):
action_class = CreateSubnetInfoAction
contributes = ("with_subnet", "subnet_name", "cidr",
"ip_version", "gateway_ip", "no_gateway")
class CreateSubnetDetailAction(workflows.Action):
enable_dhcp = forms.BooleanField(label=_("Enable DHCP"),
initial=True, required=False)
ipv6_modes = forms.ChoiceField(
label=_("IPv6 Address Configuration Mode"),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'ipversion',
'data-ipversion-6': _("IPv6 Address Configuration Mode"),
}),
initial=utils.IPV6_DEFAULT_MODE,
required=False,
help_text=_("Specifies how IPv6 addresses and additional information "
"are configured. We can specify SLAAC/DHCPv6 stateful/"
"DHCPv6 stateless provided by OpenStack, "
"or specify no option. "
"'No options specified' means addresses are configured "
"manually or configured by a non-OpenStack system."))
allocation_pools = forms.CharField(
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Allocation Pools"),
help_text=_("IP address allocation pools. Each entry is: "
"start_ip_address,end_ip_address "
"(e.g., 192.168.1.100,192.168.1.120) "
"and one entry per line."),
required=False)
dns_nameservers = forms.CharField(
widget=forms.widgets.Textarea(attrs={'rows': 4}),
label=_("DNS Name Servers"),
help_text=_("IP address list of DNS name servers for this subnet. "
"One entry per line."),
required=False)
host_routes = forms.CharField(
widget=forms.widgets.Textarea(attrs={'rows': 4}),
label=_("Host Routes"),
help_text=_("Additional routes announced to the hosts. "
"Each entry is: destination_cidr,nexthop "
"(e.g., 192.168.200.0/24,10.56.1.254) "
"and one entry per line."),
required=False)
class Meta(object):
name = _("Subnet Details")
help_text = _('Specify additional attributes for the subnet.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetDetailAction, self).__init__(request, context,
*args, **kwargs)
if not getattr(settings, 'OPENSTACK_NEUTRON_NETWORK',
{}).get('enable_ipv6', True):
self.fields['ipv6_modes'].widget = forms.HiddenInput()
def populate_ipv6_modes_choices(self, request, context):
return [(value, _("%s (Default)") % label)
if value == utils.IPV6_DEFAULT_MODE
else (value, label)
for value, label in utils.IPV6_MODE_CHOICES]
def _convert_ip_address(self, ip, field_name):
try:
return netaddr.IPAddress(ip)
except (netaddr.AddrFormatError, ValueError):
msg = (_('%(field_name)s: Invalid IP address (value=%(ip)s)')
% {'field_name': field_name, 'ip': ip})
raise forms.ValidationError(msg)
def _convert_ip_network(self, network, field_name):
try:
return netaddr.IPNetwork(network)
except (netaddr.AddrFormatError, ValueError):
msg = (_('%(field_name)s: Invalid IP address (value=%(network)s)')
% {'field_name': field_name, 'network': network})
raise forms.ValidationError(msg)
def _check_allocation_pools(self, allocation_pools):
for p in allocation_pools.split('\n'):
p = p.strip()
if not p:
continue
pool = p.split(',')
if len(pool) != 2:
msg = _('Start and end addresses must be specified '
'(value=%s)') % p
raise forms.ValidationError(msg)
start, end = [self._convert_ip_address(ip, "allocation_pools")
for ip in pool]
if start > end:
msg = _('Start address is larger than end address '
'(value=%s)') % p
raise forms.ValidationError(msg)
def _check_dns_nameservers(self, dns_nameservers):
for ns in dns_nameservers.split('\n'):
ns = ns.strip()
if not ns:
continue
self._convert_ip_address(ns, "dns_nameservers")
def _check_host_routes(self, host_routes):
for r in host_routes.split('\n'):
r = r.strip()
if not r:
continue
route = r.split(',')
if len(route) != 2:
msg = _('Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)') % r
raise forms.ValidationError(msg)
self._convert_ip_network(route[0], "host_routes")
self._convert_ip_address(route[1], "host_routes")
def clean(self):
cleaned_data = super(CreateSubnetDetailAction, self).clean()
self._check_allocation_pools(cleaned_data.get('allocation_pools'))
self._check_host_routes(cleaned_data.get('host_routes'))
self._check_dns_nameservers(cleaned_data.get('dns_nameservers'))
return cleaned_data
class CreateSubnetDetail(workflows.Step):
action_class = CreateSubnetDetailAction
contributes = ("enable_dhcp", "ipv6_modes", "allocation_pools",
"dns_nameservers", "host_routes")
class CreateNetwork(workflows.Workflow):
slug = "create_network"
name = _("Create Network")
finalize_button_name = _("Create")
success_message = _('Created network "%s".')
failure_message = _('Unable to create network "%s".')
default_steps = (CreateNetworkInfo,
CreateSubnetInfo,
CreateSubnetDetail)
wizard = True
def get_success_url(self):
return reverse("horizon:project:networks:index")
def get_failure_url(self):
return reverse("horizon:project:networks:index")
def format_status_message(self, message):
name = self.context.get('net_name') or self.context.get('net_id', '')
return message % name
def _create_network(self, request, data):
try:
params = {'name': data['net_name'],
'admin_state_up': (data['admin_state'] == 'True')}
if api.neutron.is_port_profiles_supported():
params['net_profile_id'] = data['net_profile_id']
network = api.neutron.network_create(request, **params)
self.context['net_id'] = network.id
msg = (_('Network "%s" was successfully created.') %
network.name_or_id)
LOG.debug(msg)
return network
except Exception as e:
msg = (_('Failed to create network "%(network)s": %(reason)s') %
{"network": data['net_name'], "reason": e})
LOG.info(msg)
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
return False
def _setup_subnet_parameters(self, params, data, is_create=True):
"""Setup subnet parameters
This methods setups subnet parameters which are available
in both create and update.
"""
is_update = not is_create
params['enable_dhcp'] = data['enable_dhcp']
if int(data['ip_version']) == 6:
ipv6_modes = utils.get_ipv6_modes_attrs_from_menu(
data['ipv6_modes'])
if ipv6_modes[0] or is_update:
params['ipv6_ra_mode'] = ipv6_modes[0]
if ipv6_modes[1] or is_update:
params['ipv6_address_mode'] = ipv6_modes[1]
if is_create and data['allocation_pools']:
pools = [dict(zip(['start', 'end'], pool.strip().split(',')))
for pool in data['allocation_pools'].split('\n')
if pool.strip()]
params['allocation_pools'] = pools
if data['host_routes'] or is_update:
routes = [dict(zip(['destination', 'nexthop'],
route.strip().split(',')))
for route in data['host_routes'].split('\n')
if route.strip()]
params['host_routes'] = routes
if data['dns_nameservers'] or is_update:
nameservers = [ns.strip()
for ns in data['dns_nameservers'].split('\n')
if ns.strip()]
params['dns_nameservers'] = nameservers
def _create_subnet(self, request, data, network=None, tenant_id=None,
no_redirect=False):
if network:
network_id = network.id
network_name = network.name
else:
network_id = self.context.get('network_id')
network_name = self.context.get('network_name')
try:
params = {'network_id': network_id,
'name': data['subnet_name'],
'cidr': data['cidr'],
'ip_version': int(data['ip_version'])}
if tenant_id:
params['tenant_id'] = tenant_id
if data['no_gateway']:
params['gateway_ip'] = None
elif data['gateway_ip']:
params['gateway_ip'] = data['gateway_ip']
self._setup_subnet_parameters(params, data)
subnet = api.neutron.subnet_create(request, **params)
self.context['subnet_id'] = subnet.id
msg = _('Subnet "%s" was successfully created.') % data['cidr']
LOG.debug(msg)
return subnet
except Exception as e:
msg = _('Failed to create subnet "%(sub)s" for network "%(net)s": '
' %(reason)s')
if no_redirect:
redirect = None
else:
redirect = self.get_failure_url()
exceptions.handle(request,
msg % {"sub": data['cidr'], "net": network_name,
"reason": e},
redirect=redirect)
return False
def _delete_network(self, request, network):
"""Delete the created network when subnet creation failed."""
try:
api.neutron.network_delete(request, network.id)
msg = _('Delete the created network "%s" '
'due to subnet creation failure.') % network.name
LOG.debug(msg)
redirect = self.get_failure_url()
messages.info(request, msg)
raise exceptions.Http302(redirect)
except Exception:
msg = _('Failed to delete network "%s"') % network.name
LOG.info(msg)
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
def handle(self, request, data):
network = self._create_network(request, data)
if not network:
return False
# If we do not need to create a subnet, return here.
if not data['with_subnet']:
return True
subnet = self._create_subnet(request, data, network, no_redirect=True)
if subnet:
return True
else:
self._delete_network(request, network)
return False
|
fernandezcuesta/ansible | refs/heads/devel | lib/ansible/modules/cloud/centurylink/clc_server.py | 8 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_server
short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
description:
- An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
version_added: "2.0"
options:
additional_disks:
description:
- The list of additional disks for the server
required: False
default: []
add_public_ip:
description:
- Whether to add a public ip to the server
required: False
default: False
choices: [False, True]
alias:
description:
- The account alias to provision the servers under.
required: False
default: None
anti_affinity_policy_id:
description:
- The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
required: False
default: None
anti_affinity_policy_name:
description:
- The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
required: False
default: None
alert_policy_id:
description:
- The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
required: False
default: None
alert_policy_name:
description:
- The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
required: False
default: None
count:
description:
- The number of servers to build (mutually exclusive with exact_count)
required: False
default: 1
count_group:
description:
- Required when exact_count is specified. The Server Group use to determine how many severs to deploy.
required: False
default: None
cpu:
description:
- How many CPUs to provision on the server
default: 1
required: False
cpu_autoscale_policy_id:
description:
- The autoscale policy to assign to the server.
default: None
required: False
custom_fields:
description:
- The list of custom fields to set on the server.
default: []
required: False
description:
description:
- The description to set for the server.
default: None
required: False
exact_count:
description:
- Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
creating and deleting them to reach that count. Requires count_group to be set.
default: None
required: False
group:
description:
- The Server Group to create servers under.
default: 'Default Group'
required: False
ip_address:
description:
- The IP Address for the server. One is assigned if not provided.
default: None
required: False
location:
description:
- The Datacenter to create servers in.
default: None
required: False
managed_os:
description:
- Whether to create the server as 'Managed' or not.
default: False
required: False
choices: [True, False]
memory:
description:
- Memory in GB.
default: 1
required: False
name:
description:
- A 1 to 6 character identifier to use for the server. This is required when state is 'present'
default: None
required: False
network_id:
description:
- The network UUID on which to create servers.
default: None
required: False
packages:
description:
- The list of blue print packages to run on the server after its created.
default: []
required: False
password:
description:
- Password for the administrator / root user
default: None
required: False
primary_dns:
description:
- Primary DNS used by the server.
default: None
required: False
public_ip_protocol:
description:
- The protocol to use for the public ip if add_public_ip is set to True.
default: 'TCP'
choices: ['TCP', 'UDP', 'ICMP']
required: False
public_ip_ports:
description:
- A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
default: []
required: False
secondary_dns:
description:
- Secondary DNS used by the server.
default: None
required: False
server_ids:
description:
- Required for started, stopped, and absent states.
A list of server Ids to insure are started, stopped, or absent.
default: []
required: False
source_server_password:
description:
- The password for the source server if a clone is specified.
default: None
required: False
state:
description:
- The state to insure that the provided resources are in.
default: 'present'
required: False
choices: ['present', 'absent', 'started', 'stopped']
storage_type:
description:
- The type of storage to attach to the server.
default: 'standard'
required: False
choices: ['standard', 'hyperscale']
template:
description:
- The template to use for server creation. Will search for a template if a partial string is provided.
This is required when state is 'present'
default: None
required: False
ttl:
description:
- The time to live for the server in seconds. The server will be deleted when this time expires.
default: None
required: False
type:
description:
- The type of server to create.
default: 'standard'
required: False
choices: ['standard', 'hyperscale', 'bareMetal']
configuration_id:
description:
- Only required for bare metal servers.
Specifies the identifier for the specific configuration type of bare metal server to deploy.
default: None
required: False
os_type:
description:
- Only required for bare metal servers.
Specifies the OS to provision with the bare metal server.
default: None
required: False
choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Provision a single Ubuntu Server
clc_server:
name: test
template: ubuntu-14-64
count: 1
group: Default Group
state: present
- name: Ensure 'Default Group' has exactly 5 servers
clc_server:
name: test
template: ubuntu-14-64
exact_count: 5
count_group: Default Group
group: Default Group
- name: Stop a Server
clc_server:
server_ids:
- UC1ACCT-TEST01
state: stopped
- name: Start a Server
clc_server:
server_ids:
- UC1ACCT-TEST01
state: started
- name: Delete a Server
clc_server:
server_ids:
- UC1ACCT-TEST01
state: absent
'''
RETURN = '''
server_ids:
description: The list of server ids that are created
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
partially_created_server_ids:
description: The list of server ids that are partially created
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
servers:
description: The list of server objects returned from CLC
returned: success
type: list
sample:
[
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":1438196820,
"modifiedBy":"service.wfad",
"modifiedDate":1438196820
},
"description":"test-server",
"details":{
"alertPolicies":[
],
"cpu":1,
"customFields":[
],
"diskCount":3,
"disks":[
{
"id":"0:0",
"partitionPaths":[
],
"sizeGB":1
},
{
"id":"0:1",
"partitionPaths":[
],
"sizeGB":2
},
{
"id":"0:2",
"partitionPaths":[
],
"sizeGB":14
}
],
"hostName":"",
"inMaintenanceMode":false,
"ipAddresses":[
{
"internal":"10.1.1.1"
}
],
"memoryGB":1,
"memoryMB":1024,
"partitions":[
],
"powerState":"started",
"snapshots":[
],
"storageGB":17
},
"groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
"id":"test-server",
"ipaddress":"10.120.45.23",
"isTemplate":false,
"links":[
{
"href":"/v2/servers/wfad/test-server",
"id":"test-server",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"group"
},
{
"href":"/v2/accounts/wfad",
"id":"wfad",
"rel":"account"
},
{
"href":"/v2/billing/wfad/serverPricing/test-server",
"rel":"billing"
},
{
"href":"/v2/servers/wfad/test-server/publicIPAddresses",
"rel":"publicIPAddresses",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/credentials",
"rel":"credentials"
},
{
"href":"/v2/servers/wfad/test-server/statistics",
"rel":"statistics"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/capabilities",
"rel":"capabilities"
},
{
"href":"/v2/servers/wfad/test-server/alertPolicies",
"rel":"alertPolicyMappings",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
"rel":"antiAffinityPolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
},
{
"href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
"rel":"cpuAutoscalePolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
}
],
"locationId":"UC1",
"name":"test-server",
"os":"ubuntu14_64Bit",
"osType":"Ubuntu 14 64-bit",
"status":"active",
"storageType":"standard",
"type":"standard"
}
]
'''
__version__ = '${version}'
import json
import os
import time
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcServer:
clc = clc_sdk
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.group_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
changed = False
new_server_ids = []
server_dict_array = []
self._set_clc_credentials_from_env()
self.module.params = self._validate_module_params(
self.clc,
self.module)
p = self.module.params
state = p.get('state')
#
# Handle each state
#
partial_servers_ids = []
if state == 'absent':
server_ids = p['server_ids']
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of instances to delete: %s' %
server_ids)
(changed,
server_dict_array,
new_server_ids) = self._delete_servers(module=self.module,
clc=self.clc,
server_ids=server_ids)
elif state in ('started', 'stopped'):
server_ids = p.get('server_ids')
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of servers to run: %s' %
server_ids)
(changed,
server_dict_array,
new_server_ids) = self._start_stop_servers(self.module,
self.clc,
server_ids)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not p.get('template') and p.get('type') != 'bareMetal':
return self.module.fail_json(
msg='template parameter is required for new instance')
if p.get('exact_count') is None:
(server_dict_array,
new_server_ids,
partial_servers_ids,
changed) = self._create_servers(self.module,
self.clc)
else:
(server_dict_array,
new_server_ids,
partial_servers_ids,
changed) = self._enforce_count(self.module,
self.clc)
self.module.exit_json(
changed=changed,
server_ids=new_server_ids,
partially_created_server_ids=partial_servers_ids,
servers=server_dict_array)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
name=dict(),
template=dict(),
group=dict(default='Default Group'),
network_id=dict(),
location=dict(default=None),
cpu=dict(default=1),
memory=dict(default=1),
alias=dict(default=None),
password=dict(default=None, no_log=True),
ip_address=dict(default=None),
storage_type=dict(
default='standard',
choices=[
'standard',
'hyperscale']),
type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
primary_dns=dict(default=None),
secondary_dns=dict(default=None),
additional_disks=dict(type='list', default=[]),
custom_fields=dict(type='list', default=[]),
ttl=dict(default=None),
managed_os=dict(type='bool', default=False),
description=dict(default=None),
source_server_password=dict(default=None, no_log=True),
cpu_autoscale_policy_id=dict(default=None),
anti_affinity_policy_id=dict(default=None),
anti_affinity_policy_name=dict(default=None),
alert_policy_id=dict(default=None),
alert_policy_name=dict(default=None),
packages=dict(type='list', default=[]),
state=dict(
default='present',
choices=[
'present',
'absent',
'started',
'stopped']),
count=dict(type='int', default=1),
exact_count=dict(type='int', default=None),
count_group=dict(),
server_ids=dict(type='list', default=[]),
add_public_ip=dict(type='bool', default=False),
public_ip_protocol=dict(
default='TCP',
choices=[
'TCP',
'UDP',
'ICMP']),
public_ip_ports=dict(type='list', default=[]),
configuration_id=dict(default=None),
os_type=dict(default=None,
choices=[
'redHat6_64Bit',
'centOS6_64Bit',
'windows2012R2Standard_64Bit',
'ubuntu14_64Bit'
]),
wait=dict(type='bool', default=True))
mutually_exclusive = [
['exact_count', 'count'],
['exact_count', 'state'],
['anti_affinity_policy_id', 'anti_affinity_policy_name'],
['alert_policy_id', 'alert_policy_name'],
]
return {"argument_spec": argument_spec,
"mutually_exclusive": mutually_exclusive}
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
@staticmethod
def _validate_module_params(clc, module):
"""
Validate the module params, and lookup default values.
:param clc: clc-sdk instance to use
:param module: module to validate
:return: dictionary of validated params
"""
params = module.params
datacenter = ClcServer._find_datacenter(clc, module)
ClcServer._validate_types(module)
ClcServer._validate_name(module)
params['alias'] = ClcServer._find_alias(clc, module)
params['cpu'] = ClcServer._find_cpu(clc, module)
params['memory'] = ClcServer._find_memory(clc, module)
params['description'] = ClcServer._find_description(module)
params['ttl'] = ClcServer._find_ttl(clc, module)
params['template'] = ClcServer._find_template_id(module, datacenter)
params['group'] = ClcServer._find_group(module, datacenter).id
params['network_id'] = ClcServer._find_network_id(module, datacenter)
params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
clc,
module)
params['alert_policy_id'] = ClcServer._find_alert_policy_id(
clc,
module)
return params
@staticmethod
def _find_datacenter(clc, module):
"""
Find the datacenter by calling the CLC API.
:param clc: clc-sdk instance to use
:param module: module to validate
:return: clc-sdk.Datacenter instance
"""
location = module.params.get('location')
try:
if not location:
account = clc.v2.Account()
location = account.data.get('primaryDataCenter')
data_center = clc.v2.Datacenter(location)
return data_center
except CLCException:
module.fail_json(msg="Unable to find location: {0}".format(location))
@staticmethod
def _find_alias(clc, module):
"""
Find or Validate the Account Alias by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: clc-sdk.Account instance
"""
alias = module.params.get('alias')
if not alias:
try:
alias = clc.v2.Account.GetAlias()
except CLCException as ex:
module.fail_json(msg='Unable to find account alias. {0}'.format(
ex.message
))
return alias
@staticmethod
def _find_cpu(clc, module):
"""
Find or validate the CPU value by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: Int value for CPU
"""
cpu = module.params.get('cpu')
group_id = module.params.get('group_id')
alias = module.params.get('alias')
state = module.params.get('state')
if not cpu and state == 'present':
group = clc.v2.Group(id=group_id,
alias=alias)
if group.Defaults("cpu"):
cpu = group.Defaults("cpu")
else:
module.fail_json(
msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
return cpu
@staticmethod
def _find_memory(clc, module):
"""
Find or validate the Memory value by calling the CLC API
:param clc: clc-sdk instance to use
:param module: module to validate
:return: Int value for Memory
"""
memory = module.params.get('memory')
group_id = module.params.get('group_id')
alias = module.params.get('alias')
state = module.params.get('state')
if not memory and state == 'present':
group = clc.v2.Group(id=group_id,
alias=alias)
if group.Defaults("memory"):
memory = group.Defaults("memory")
else:
module.fail_json(msg=str(
"Can\'t determine a default memory value. Please provide a value for memory."))
return memory
@staticmethod
def _find_description(module):
"""
Set the description module param to name if description is blank
:param module: the module to validate
:return: string description
"""
description = module.params.get('description')
if not description:
description = module.params.get('name')
return description
@staticmethod
def _validate_types(module):
"""
Validate that type and storage_type are set appropriately, and fail if not
:param module: the module to validate
:return: none
"""
state = module.params.get('state')
server_type = module.params.get(
'type').lower() if module.params.get('type') else None
storage_type = module.params.get(
'storage_type').lower() if module.params.get('storage_type') else None
if state == "present":
if server_type == "standard" and storage_type not in (
"standard", "premium"):
module.fail_json(
msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
if server_type == "hyperscale" and storage_type != "hyperscale":
module.fail_json(
msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
@staticmethod
def _validate_name(module):
"""
Validate that name is the correct length if provided, fail if it's not
:param module: the module to validate
:return: none
"""
server_name = module.params.get('name')
state = module.params.get('state')
if state == 'present' and (
len(server_name) < 1 or len(server_name) > 6):
module.fail_json(msg=str(
"When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
@staticmethod
def _find_ttl(clc, module):
"""
Validate that TTL is > 3600 if set, and fail if not
:param clc: clc-sdk instance to use
:param module: module to validate
:return: validated ttl
"""
ttl = module.params.get('ttl')
if ttl:
if ttl <= 3600:
return module.fail_json(msg=str("Ttl cannot be <= 3600"))
else:
ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
return ttl
@staticmethod
def _find_template_id(module, datacenter):
"""
Find the template id by calling the CLC API.
:param module: the module to validate
:param datacenter: the datacenter to search for the template
:return: a valid clc template id
"""
lookup_template = module.params.get('template')
state = module.params.get('state')
type = module.params.get('type')
result = None
if state == 'present' and type != 'bareMetal':
try:
result = datacenter.Templates().Search(lookup_template)[0].id
except CLCException:
module.fail_json(
msg=str(
"Unable to find a template: " +
lookup_template +
" in location: " +
datacenter.id))
return result
@staticmethod
def _find_network_id(module, datacenter):
"""
Validate the provided network id or return a default.
:param module: the module to validate
:param datacenter: the datacenter to search for a network id
:return: a valid network id
"""
network_id = module.params.get('network_id')
if not network_id:
try:
network_id = datacenter.Networks().networks[0].id
# -- added for clc-sdk 2.23 compatibility
# datacenter_networks = clc_sdk.v2.Networks(
# networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
# network_id = datacenter_networks.networks[0].id
# -- end
except CLCException:
module.fail_json(
msg=str(
"Unable to find a network in location: " +
datacenter.id))
return network_id
@staticmethod
def _find_aa_policy_id(clc, module):
"""
Validate if the anti affinity policy exist for the given name and throw error if not
:param clc: the clc-sdk instance
:param module: the module to validate
:return: aa_policy_id: the anti affinity policy id of the given name.
"""
aa_policy_id = module.params.get('anti_affinity_policy_id')
aa_policy_name = module.params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
alias = module.params.get('alias')
aa_policy_id = ClcServer._get_anti_affinity_policy_id(
clc,
module,
alias,
aa_policy_name)
if not aa_policy_id:
module.fail_json(
msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
return aa_policy_id
@staticmethod
def _find_alert_policy_id(clc, module):
"""
Validate if the alert policy exist for the given name and throw error if not
:param clc: the clc-sdk instance
:param module: the module to validate
:return: alert_policy_id: the alert policy id of the given name.
"""
alert_policy_id = module.params.get('alert_policy_id')
alert_policy_name = module.params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alias = module.params.get('alias')
alert_policy_id = ClcServer._get_alert_policy_id_by_name(
clc=clc,
module=module,
alias=alias,
alert_policy_name=alert_policy_name
)
if not alert_policy_id:
module.fail_json(
msg='No alert policy exist with name : %s' % alert_policy_name)
return alert_policy_id
def _create_servers(self, module, clc, override_count=None):
"""
Create New Servers in CLC cloud
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:return: a list of dictionaries with server information about the servers that were created
"""
p = module.params
request_list = []
servers = []
server_dict_array = []
created_server_ids = []
partial_created_servers_ids = []
add_public_ip = p.get('add_public_ip')
public_ip_protocol = p.get('public_ip_protocol')
public_ip_ports = p.get('public_ip_ports')
params = {
'name': p.get('name'),
'template': p.get('template'),
'group_id': p.get('group'),
'network_id': p.get('network_id'),
'cpu': p.get('cpu'),
'memory': p.get('memory'),
'alias': p.get('alias'),
'password': p.get('password'),
'ip_address': p.get('ip_address'),
'storage_type': p.get('storage_type'),
'type': p.get('type'),
'primary_dns': p.get('primary_dns'),
'secondary_dns': p.get('secondary_dns'),
'additional_disks': p.get('additional_disks'),
'custom_fields': p.get('custom_fields'),
'ttl': p.get('ttl'),
'managed_os': p.get('managed_os'),
'description': p.get('description'),
'source_server_password': p.get('source_server_password'),
'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
'packages': p.get('packages'),
'configuration_id': p.get('configuration_id'),
'os_type': p.get('os_type')
}
count = override_count if override_count else p.get('count')
changed = False if count == 0 else True
if not changed:
return server_dict_array, created_server_ids, partial_created_servers_ids, changed
for i in range(0, count):
if not module.check_mode:
req = self._create_clc_server(clc=clc,
module=module,
server_params=params)
server = req.requests[0].Server()
request_list.append(req)
servers.append(server)
self._wait_for_requests(module, request_list)
self._refresh_servers(module, servers)
ip_failed_servers = self._add_public_ip_to_servers(
module=module,
should_add_public_ip=add_public_ip,
servers=servers,
public_ip_protocol=public_ip_protocol,
public_ip_ports=public_ip_ports)
ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
module=module,
servers=servers)
for server in servers:
if server in ip_failed_servers or server in ap_failed_servers:
partial_created_servers_ids.append(server.id)
else:
# reload server details
server = clc.v2.Server(server.id)
server.data['ipaddress'] = server.details[
'ipAddresses'][0]['internal']
if add_public_ip and len(server.PublicIPs().public_ips) > 0:
server.data['publicip'] = str(
server.PublicIPs().public_ips[0])
created_server_ids.append(server.id)
server_dict_array.append(server.data)
return server_dict_array, created_server_ids, partial_created_servers_ids, changed
def _enforce_count(self, module, clc):
"""
Enforce that there is the right number of servers in the provided group.
Starts or stops servers as necessary.
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:return: a list of dictionaries with server information about the servers that were created or deleted
"""
p = module.params
changed = False
count_group = p.get('count_group')
datacenter = ClcServer._find_datacenter(clc, module)
exact_count = p.get('exact_count')
server_dict_array = []
partial_servers_ids = []
changed_server_ids = []
# fail here if the exact count was specified without filtering
# on a group, as this may lead to a undesired removal of instances
if exact_count and count_group is None:
return module.fail_json(
msg="you must use the 'count_group' option with exact_count")
servers, running_servers = ClcServer._find_running_servers_by_group(
module, datacenter, count_group)
if len(running_servers) == exact_count:
changed = False
elif len(running_servers) < exact_count:
to_create = exact_count - len(running_servers)
server_dict_array, changed_server_ids, partial_servers_ids, changed \
= self._create_servers(module, clc, override_count=to_create)
for server in server_dict_array:
running_servers.append(server)
elif len(running_servers) > exact_count:
to_remove = len(running_servers) - exact_count
all_server_ids = sorted([x.id for x in running_servers])
remove_ids = all_server_ids[0:to_remove]
(changed, server_dict_array, changed_server_ids) \
= ClcServer._delete_servers(module, clc, remove_ids)
return server_dict_array, changed_server_ids, partial_servers_ids, changed
@staticmethod
def _wait_for_requests(module, request_list):
"""
Block until server provisioning requests are completed.
:param module: the AnsibleModule object
:param request_list: a list of clc-sdk.Request instances
:return: none
"""
wait = module.params.get('wait')
if wait:
# Requests.WaitUntilComplete() returns the count of failed requests
failed_requests_count = sum(
[request.WaitUntilComplete() for request in request_list])
if failed_requests_count > 0:
module.fail_json(
msg='Unable to process server request')
@staticmethod
def _refresh_servers(module, servers):
"""
Loop through a list of servers and refresh them.
:param module: the AnsibleModule object
:param servers: list of clc-sdk.Server instances to refresh
:return: none
"""
for server in servers:
try:
server.Refresh()
except CLCException as ex:
module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
server.id, ex.message
))
@staticmethod
def _add_public_ip_to_servers(
module,
should_add_public_ip,
servers,
public_ip_protocol,
public_ip_ports):
"""
Create a public IP for servers
:param module: the AnsibleModule object
:param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
:param servers: List of servers to add public ips to
:param public_ip_protocol: a protocol to allow for the public ips
:param public_ip_ports: list of ports to allow for the public ips
:return: none
"""
failed_servers = []
if not should_add_public_ip:
return failed_servers
ports_lst = []
request_list = []
server = None
for port in public_ip_ports:
ports_lst.append(
{'protocol': public_ip_protocol, 'port': port})
try:
if not module.check_mode:
for server in servers:
request = server.PublicIPs().Add(ports_lst)
request_list.append(request)
except APIFailedResponse:
failed_servers.append(server)
ClcServer._wait_for_requests(module, request_list)
return failed_servers
@staticmethod
def _add_alert_policy_to_servers(clc, module, servers):
"""
Associate the alert policy to servers
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param servers: List of servers to add alert policy to
:return: failed_servers: the list of servers which failed while associating alert policy
"""
failed_servers = []
p = module.params
alert_policy_id = p.get('alert_policy_id')
alias = p.get('alias')
if alert_policy_id and not module.check_mode:
for server in servers:
try:
ClcServer._add_alert_policy_to_server(
clc=clc,
alias=alias,
server_id=server.id,
alert_policy_id=alert_policy_id)
except CLCException:
failed_servers.append(server)
return failed_servers
@staticmethod
def _add_alert_policy_to_server(
clc, alias, server_id, alert_policy_id):
"""
Associate an alert policy to a clc server
:param clc: the clc-sdk instance to use
:param alias: the clc account alias
:param server_id: The clc server id
:param alert_policy_id: the alert policy id to be associated to the server
:return: none
"""
try:
clc.v2.API.Call(
method='POST',
url='servers/%s/%s/alertPolicies' % (alias, server_id),
payload=json.dumps(
{
'id': alert_policy_id
}))
except APIFailedResponse as e:
raise CLCException(
'Failed to associate alert policy to the server : {0} with Error {1}'.format(
server_id, str(e.response_text)))
@staticmethod
def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
"""
Returns the alert policy id for the given alert policy name
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the clc account alias
:param alert_policy_name: the name of the alert policy
:return: alert_policy_id: the alert policy id
"""
alert_policy_id = None
policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
if not policies:
return alert_policy_id
for policy in policies.get('items'):
if policy.get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = policy.get('id')
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _delete_servers(module, clc, server_ids):
"""
Delete the servers on the provided list
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:param server_ids: list of servers to delete
:return: a list of dictionaries with server information about the servers that were deleted
"""
terminated_server_ids = []
server_dict_array = []
request_list = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = clc.v2.Servers(server_ids).Servers()
for server in servers:
if not module.check_mode:
request_list.append(server.Delete())
ClcServer._wait_for_requests(module, request_list)
for server in servers:
terminated_server_ids.append(server.id)
return True, server_dict_array, terminated_server_ids
@staticmethod
def _start_stop_servers(module, clc, server_ids):
"""
Start or Stop the servers on the provided list
:param module: the AnsibleModule object
:param clc: the clc-sdk instance to use
:param server_ids: list of servers to start or stop
:return: a list of dictionaries with server information about the servers that were started or stopped
"""
p = module.params
state = p.get('state')
changed = False
changed_servers = []
server_dict_array = []
result_server_ids = []
request_list = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = clc.v2.Servers(server_ids).Servers()
for server in servers:
if server.powerState != state:
changed_servers.append(server)
if not module.check_mode:
request_list.append(
ClcServer._change_server_power_state(
module,
server,
state))
changed = True
ClcServer._wait_for_requests(module, request_list)
ClcServer._refresh_servers(module, changed_servers)
for server in set(changed_servers + servers):
try:
server.data['ipaddress'] = server.details[
'ipAddresses'][0]['internal']
server.data['publicip'] = str(
server.PublicIPs().public_ips[0])
except (KeyError, IndexError):
pass
server_dict_array.append(server.data)
result_server_ids.append(server.id)
return changed, server_dict_array, result_server_ids
@staticmethod
def _change_server_power_state(module, server, state):
"""
Change the server powerState
:param module: the module to check for intended state
:param server: the server to start or stop
:param state: the intended powerState for the server
:return: the request object from clc-sdk call
"""
result = None
try:
if state == 'started':
result = server.PowerOn()
else:
# Try to shut down the server and fall back to power off when unable to shut down.
result = server.ShutDown()
if result and hasattr(result, 'requests') and result.requests[0]:
return result
else:
result = server.PowerOff()
except CLCException:
module.fail_json(
msg='Unable to change power state for server {0}'.format(
server.id))
return result
@staticmethod
def _find_running_servers_by_group(module, datacenter, count_group):
"""
Find a list of running servers in the provided group
:param module: the AnsibleModule object
:param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
:param count_group: the group to count the servers
:return: list of servers, and list of running servers
"""
group = ClcServer._find_group(
module=module,
datacenter=datacenter,
lookup_group=count_group)
servers = group.Servers().Servers()
running_servers = []
for server in servers:
if server.status == 'active' and server.powerState == 'started':
running_servers.append(server)
return servers, running_servers
@staticmethod
def _find_group(module, datacenter, lookup_group=None):
"""
Find a server group in a datacenter by calling the CLC API
:param module: the AnsibleModule instance
:param datacenter: clc-sdk.Datacenter instance to search for the group
:param lookup_group: string name of the group to search for
:return: clc-sdk.Group instance
"""
if not lookup_group:
lookup_group = module.params.get('group')
try:
return datacenter.Groups().Get(lookup_group)
except CLCException:
pass
# The search above only acts on the main
result = ClcServer._find_group_recursive(
module,
datacenter.Groups(),
lookup_group)
if result is None:
module.fail_json(
msg=str(
"Unable to find group: " +
lookup_group +
" in location: " +
datacenter.id))
return result
@staticmethod
def _find_group_recursive(module, group_list, lookup_group):
"""
Find a server group by recursively walking the tree
:param module: the AnsibleModule instance to use
:param group_list: a list of groups to search
:param lookup_group: the group to look for
:return: list of groups
"""
result = None
for group in group_list.groups:
subgroups = group.Subgroups()
try:
return subgroups.Get(lookup_group)
except CLCException:
result = ClcServer._find_group_recursive(
module,
subgroups,
lookup_group)
if result is not None:
break
return result
@staticmethod
def _create_clc_server(
clc,
module,
server_params):
"""
Call the CLC Rest API to Create a Server
:param clc: the clc-python-sdk instance to use
:param module: the AnsibleModule instance to use
:param server_params: a dictionary of params to use to create the servers
:return: clc-sdk.Request object linked to the queued server request
"""
try:
res = clc.v2.API.Call(
method='POST',
url='servers/%s' %
(server_params.get('alias')),
payload=json.dumps(
{
'name': server_params.get('name'),
'description': server_params.get('description'),
'groupId': server_params.get('group_id'),
'sourceServerId': server_params.get('template'),
'isManagedOS': server_params.get('managed_os'),
'primaryDNS': server_params.get('primary_dns'),
'secondaryDNS': server_params.get('secondary_dns'),
'networkId': server_params.get('network_id'),
'ipAddress': server_params.get('ip_address'),
'password': server_params.get('password'),
'sourceServerPassword': server_params.get('source_server_password'),
'cpu': server_params.get('cpu'),
'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
'memoryGB': server_params.get('memory'),
'type': server_params.get('type'),
'storageType': server_params.get('storage_type'),
'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
'customFields': server_params.get('custom_fields'),
'additionalDisks': server_params.get('additional_disks'),
'ttl': server_params.get('ttl'),
'packages': server_params.get('packages'),
'configurationId': server_params.get('configuration_id'),
'osType': server_params.get('os_type')}))
result = clc.v2.Requests(res)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
server_params.get('name'),
ex.response_text
))
#
# Patch the Request object so that it returns a valid server
# Find the server's UUID from the API response
server_uuid = [obj['id']
for obj in res['links'] if obj['rel'] == 'self'][0]
# Change the request server method to a _find_server_by_uuid closure so
# that it will work
result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
clc,
module,
server_uuid,
server_params.get('alias'))
return result
@staticmethod
def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
"""
retrieves the anti affinity policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param aa_policy_name: the anti affinity policy name
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
aa_policies = clc.v2.API.Call(method='GET',
url='antiAffinityPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
alias, ex.response_text))
for aa_policy in aa_policies.get('items'):
if aa_policy.get('name') == aa_policy_name:
if not aa_policy_id:
aa_policy_id = aa_policy.get('id')
else:
return module.fail_json(
msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
return aa_policy_id
#
# This is the function that gets patched to the Request.server object using a lamda closure
#
@staticmethod
def _find_server_by_uuid_w_retry(
clc, module, svr_uuid, alias=None, retries=5, back_out=2):
"""
Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param svr_uuid: UUID of the server
:param retries: the number of retry attempts to make prior to fail. default is 5
:param alias: the Account Alias to search
:return: a clc-sdk.Server instance
"""
if not alias:
alias = clc.v2.Account.GetAlias()
# Wait and retry if the api returns a 404
while True:
retries -= 1
try:
server_obj = clc.v2.API.Call(
method='GET', url='servers/%s/%s?uuid=true' %
(alias, svr_uuid))
server_id = server_obj['id']
server = clc.v2.Server(
id=server_id,
alias=alias,
server_obj=server_obj)
return server
except APIFailedResponse as e:
if e.response_status_code != 404:
return module.fail_json(
msg='A failure response was received from CLC API when '
'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
(svr_uuid, e.response_status_code, e.message))
if retries == 0:
return module.fail_json(
msg='Unable to reach the CLC API after 5 attempts')
time.sleep(back_out)
back_out *= 2
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcServer._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_server = ClcServer(module)
clc_server.process_request()
if __name__ == '__main__':
main()
|
css-iter/cs-studio | refs/heads/master | build/org.csstudio.validation.scripts/check_dependencies.py | 3 | '''
Created on Oct 3, 2014
The goal is to scan the features and plugins in the core and applications to ensure
1. plugins are not present in duplicate features
@author: Kunal Shroff
'''
import os.path
import xml.etree.ElementTree as ET
from xml.dom import minidom
import sys
from optparse import OptionParser
'''since ElementTree does not allow easy access to the name space, simply setting it as a variable'''
xmlns="{http://maven.apache.org/POM/4.0.0}"
def readFeatures(repoDir, includeRAP=False):
'''
Read the all feature.xml
{id:
{id:'feature.id',
file:'complete file path'
plugins:['plugin.ids']
includes:['included.features']
}
}
'''
features = {}
for dirpath, dirnames, filenames in os.walk(os.path.normpath(repoDir)):
for completefilename in [ os.path.join(dirpath, f) for f in filenames if f.endswith("feature.xml") and 'products' not in dirpath]:
xmldoc = minidom.parse(completefilename)
id = ''
for feature in xmldoc.getElementsByTagName('feature'):
id = feature._attrs[u'id'].value
if includeRAP:
plugins = []
for node in feature.getElementsByTagName('plugin'):
plugins.append(node._attrs[u'id'].value)
includes = []
for node in feature.getElementsByTagName('includes'):
includes.append(node._attrs[u'id'].value)
features[id] = {'id':id, 'file':completefilename, 'plugins':plugins, 'includes':includes}
elif 'rap' not in id:
plugins = []
for node in feature.getElementsByTagName('plugin'):
plugins.append(node._attrs[u'id'].value)
includes = []
for node in feature.getElementsByTagName('includes'):
includes.append(node._attrs[u'id'].value)
features[id] = {'id':id, 'file':completefilename, 'plugins':plugins, 'includes':includes}
return features
def ifNoneReturnDefault(object, default):
'''
if the object is None or empty string then this function returns the default value
'''
if object == None and object != '':
return default
else:
return object
if __name__ == '__main__':
repoDir = 'C:\git\cs-studio-organization-master\cs-studio'
usage = 'usage: %prog -r C:\git\cs-studio'
parser = OptionParser(usage=usage)
parser.add_option('-r', '--repoDir', \
action='store', type='string', dest='repoDir', \
help='the repoDir')
parser.add_option('-i', '--includeRAP', \
action='store_true', dest='includeRAP', default=False, \
help='include RAP features')
opts, args = parser.parse_args()
repoDir = ifNoneReturnDefault(opts.repoDir, repoDir)
includeRAP = ifNoneReturnDefault(opts.includeRAP, False)
issues = []
'''all the features in the cs-studio repository'''
allFeatures = readFeatures(repoDir, includeRAP)
'''
Check
1. Check that the included plugins are not duplicated
'''
uniquePlugins = set()
duplicatePlugins = set()
for feature in allFeatures:
duplicatePlugins.update((set(uniquePlugins) & set(allFeatures[feature]['plugins'])))
uniquePlugins.update(set(uniquePlugins) ^ set(allFeatures[feature]['plugins']))
duplicatePluginMap = {}
for d in duplicatePlugins:
duplicatePluginMap[d] = []
for f in allFeatures:
if d in allFeatures[f]['plugins']:
duplicatePluginMap[d].append(f)
issues.append('Duplicate inclusion of plugin: ' + d + ' in features: ' + str(duplicatePluginMap[d]))
issues.sort()
for issue in issues:
print issue
if len(issues) != 0:
sys.exit(-1)
|
CalebM1987/serverAdminTools | refs/heads/master | serverAdminTools/restapi/requests/packages/urllib3/contrib/ntlmpool.py | 514 | """
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
self.num_connections, self.host, self.authurl)
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', reshdr)
log.debug('Response data: %s [...]', res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s', headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s', res.status, res.reason)
log.debug('Response headers: %s', dict(res.getheaders()))
log.debug('Response data: %s [...]', res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
|
tmpgit/intellij-community | refs/heads/master | python/testData/resolve/multiFile/nestedPackage/NestedPackage.py | 83 | import foo.bar
# <ref>
|
CapOM/ChromiumGStreamerBackend | refs/heads/master | tools/telemetry/third_party/gsutilz/third_party/protorpc/protorpc/webapp/google_imports.py | 21 | """Dynamically decide from where to import other SDK modules.
All other protorpc.webapp code should import other SDK modules from
this module. If necessary, add new imports here (in both places).
"""
__author__ = 'yey@google.com (Ye Yuan)'
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import os
import sys
try:
from google.appengine import ext
normal_environment = True
except ImportError:
normal_environment = False
if normal_environment:
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util as webapp_util
from google.appengine.ext.webapp import template
|
alajfit/ngbook_tests | refs/heads/master | node_modules/karma/node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 1788 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
|
waylan/django-spambayes | refs/heads/master | urls.py | 2 | """ Sample urls.py for the demo app. """
from django.conf.urls.defaults import *
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/(.*)', admin.site.root),
(r'^sb_demo/', include('sb_demo.urls')),
(r'^comments/', include('django.contrib.comments.urls')),
)
|
kawamon/hue | refs/heads/master | desktop/core/ext-py/Django-1.11.29/tests/null_queries/tests.py | 55 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Inner, OuterA, OuterB, Poll
class NullQueriesTests(TestCase):
def test_none_as_null(self):
"""
Regression test for the use of None as a query value.
None is interpreted as an SQL NULL, but only in __exact and __iexact
queries.
Set up some initial polls and choices
"""
p1 = Poll(question='Why?')
p1.save()
c1 = Choice(poll=p1, choice='Because.')
c1.save()
c2 = Choice(poll=p1, choice='Why Not?')
c2.save()
# Exact query with value None returns nothing ("is NULL" in sql,
# but every 'id' field has a value).
self.assertQuerysetEqual(Choice.objects.filter(choice__exact=None), [])
# The same behavior for iexact query.
self.assertQuerysetEqual(Choice.objects.filter(choice__iexact=None), [])
# Excluding the previous result returns everything.
self.assertQuerysetEqual(
Choice.objects.exclude(choice=None).order_by('id'),
[
'<Choice: Choice: Because. in poll Q: Why? >',
'<Choice: Choice: Why Not? in poll Q: Why? >'
]
)
# Valid query, but fails because foo isn't a keyword
with self.assertRaises(FieldError):
Choice.objects.filter(foo__exact=None)
# Can't use None on anything other than __exact and __iexact
with self.assertRaises(ValueError):
Choice.objects.filter(id__gt=None)
# Related managers use __exact=None implicitly if the object hasn't been saved.
p2 = Poll(question="How?")
self.assertEqual(repr(p2.choice_set.all()), '<QuerySet []>')
def test_reverse_relations(self):
"""
Querying across reverse relations and then another relation should
insert outer joins correctly so as not to exclude results.
"""
obj = OuterA.objects.create()
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third=None),
['<OuterA: OuterA object>']
)
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third__data=None),
['<OuterA: OuterA object>']
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
Inner.objects.filter(first__inner__third=None),
['<Inner: Inner object>']
)
# Ticket #13815: check if <reverse>_isnull=False does not produce
# faulty empty lists
OuterB.objects.create(data="reverse")
self.assertQuerysetEqual(
OuterB.objects.filter(inner__isnull=False),
[]
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
OuterB.objects.exclude(inner__isnull=False),
['<OuterB: OuterB object>']
)
|
dantebarba/docker-media-server | refs/heads/master | plex/Sub-Zero.bundle/Contents/Libraries/Shared/appdirs.py | 335 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 0)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernal.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
|
antoan2/incubator-mxnet | refs/heads/master | example/bi-lstm-sort/gen_data.py | 39 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import random
vocab = [str(x) for x in range(100, 1000)]
sw_train = open("sort.train.txt", "w")
sw_test = open("sort.test.txt", "w")
sw_valid = open("sort.valid.txt", "w")
for i in range(1000000):
seq = " ".join([vocab[random.randint(0, len(vocab) - 1)] for j in range(5)])
k = i % 50
if k == 0:
sw_test.write(seq + "\n")
elif k == 1:
sw_valid.write(seq + "\n")
else:
sw_train.write(seq + "\n")
sw_train.close()
sw_test.close()
sw_valid.close()
|
suxinde2009/zulip | refs/heads/master | zerver/lib/socket.py | 121 | from __future__ import absolute_import
from django.conf import settings
from django.utils.importlib import import_module
from django.utils import timezone
from django.contrib.sessions.models import Session as djSession
import sockjs.tornado
import tornado.ioloop
import ujson
import logging
import time
from zerver.models import UserProfile, get_user_profile_by_id, get_client
from zerver.lib.queue import queue_json_publish
from zerver.lib.actions import check_send_message, extract_recipients
from zerver.decorator import JsonableError
from zerver.lib.utils import statsd
from zerver.lib.event_queue import get_client_descriptor
from zerver.middleware import record_request_start_data, record_request_stop_data, \
record_request_restart_data, write_log_line, format_timedelta
from zerver.lib.redis_utils import get_redis_client
from zerver.lib.session_user import get_session_user
logger = logging.getLogger('zulip.socket')
djsession_engine = import_module(settings.SESSION_ENGINE)
def get_user_profile(session_id):
if session_id is None:
return None
try:
djsession = djSession.objects.get(expire_date__gt=timezone.now(),
session_key=session_id)
except djSession.DoesNotExist:
return None
try:
return UserProfile.objects.get(pk=get_session_user(djsession))
except (UserProfile.DoesNotExist, KeyError):
return None
connections = dict()
def get_connection(id):
return connections.get(id)
def register_connection(id, conn):
# Kill any old connections if they exist
if id in connections:
connections[id].close()
conn.client_id = id
connections[conn.client_id] = conn
def deregister_connection(conn):
del connections[conn.client_id]
redis_client = get_redis_client()
def req_redis_key(req_id):
return 'socket_req_status:%s' % (req_id,)
class SocketAuthError(Exception):
def __init__(self, msg):
self.msg = msg
class CloseErrorInfo(object):
def __init__(self, status_code, err_msg):
self.status_code = status_code
self.err_msg = err_msg
class SocketConnection(sockjs.tornado.SockJSConnection):
def on_open(self, info):
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
ioloop = tornado.ioloop.IOLoop.instance()
self.authenticated = False
self.session.user_profile = None
self.close_info = None
self.did_close = False
try:
self.browser_session_id = info.get_cookie(settings.SESSION_COOKIE_NAME).value
self.csrf_token = info.get_cookie(settings.CSRF_COOKIE_NAME).value
except AttributeError:
# The request didn't contain the necessary cookie values. We can't
# close immediately because sockjs-tornado doesn't expect a close
# inside on_open(), so do it on the next tick.
self.close_info = CloseErrorInfo(403, "Initial cookie lacked required values")
ioloop.add_callback(self.close)
return
def auth_timeout():
self.close_info = CloseErrorInfo(408, "Timeout while waiting for authentication")
self.close()
self.timeout_handle = ioloop.add_timeout(time.time() + 10, auth_timeout)
write_log_line(log_data, path='/socket/open', method='SOCKET',
remote_ip=info.ip, email='unknown', client_name='?')
def authenticate_client(self, msg):
if self.authenticated:
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'error', 'msg': 'Already authenticated'}})
return
user_profile = get_user_profile(self.browser_session_id)
if user_profile is None:
raise SocketAuthError('Unknown or missing session')
self.session.user_profile = user_profile
if msg['request']['csrf_token'] != self.csrf_token:
raise SocketAuthError('CSRF token does not match that in cookie')
if not 'queue_id' in msg['request']:
raise SocketAuthError("Missing 'queue_id' argument")
queue_id = msg['request']['queue_id']
client = get_client_descriptor(queue_id)
if client is None:
raise SocketAuthError('Bad event queue id: %s' % (queue_id,))
if user_profile.id != client.user_profile_id:
raise SocketAuthError("You are not the owner of the queue with id '%s'" % (queue_id,))
self.authenticated = True
register_connection(queue_id, self)
response = {'req_id': msg['req_id'], 'type': 'response',
'response': {'result': 'success', 'msg': ''}}
status_inquiries = msg['request'].get('status_inquiries')
if status_inquiries is not None:
results = {}
for inquiry in status_inquiries:
status = redis_client.hgetall(req_redis_key(inquiry))
if len(status) == 0:
status['status'] = 'not_received'
if 'response' in status:
status['response'] = ujson.loads(status['response'])
results[str(inquiry)] = status
response['response']['status_inquiries'] = results
self.session.send_message(response)
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self.timeout_handle)
def on_message(self, msg):
log_data = dict(extra='[transport=%s' % (self.session.transport_name,))
record_request_start_data(log_data)
msg = ujson.loads(msg)
if self.did_close:
logger.info("Received message on already closed socket! transport=%s user=%s client_id=%s"
% (self.session.transport_name,
self.session.user_profile.email if self.session.user_profile is not None else 'unknown',
self.client_id))
self.session.send_message({'req_id': msg['req_id'], 'type': 'ack'})
if msg['type'] == 'auth':
log_data['extra'] += ']'
try:
self.authenticate_client(msg)
# TODO: Fill in the correct client
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email=self.session.user_profile.email,
client_name='?')
except SocketAuthError as e:
response = {'result': 'error', 'msg': e.msg}
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/auth', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
else:
if not self.authenticated:
response = {'result': 'error', 'msg': "Not yet authenticated"}
self.session.send_message({'req_id': msg['req_id'], 'type': 'response',
'response': response})
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=self.session.conn_info.ip,
email='unknown', client_name='?',
status_code=403, error_content=ujson.dumps(response))
return
redis_key = req_redis_key(msg['req_id'])
with redis_client.pipeline() as pipeline:
pipeline.hmset(redis_key, {'status': 'received'})
pipeline.expire(redis_key, 60 * 60 * 24)
pipeline.execute()
record_request_stop_data(log_data)
queue_json_publish("message_sender",
dict(request=msg['request'],
req_id=msg['req_id'],
server_meta=dict(user_id=self.session.user_profile.id,
client_id=self.client_id,
return_queue="tornado_return",
log_data=log_data,
request_environ=dict(REMOTE_ADDR=self.session.conn_info.ip))),
fake_message_sender)
def on_close(self):
log_data = dict(extra='[transport=%s]' % (self.session.transport_name,))
record_request_start_data(log_data)
if self.close_info is not None:
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email='unknown',
client_name='?', status_code=self.close_info.status_code,
error_content=self.close_info.err_msg)
else:
deregister_connection(self)
email = self.session.user_profile.email \
if self.session.user_profile is not None else 'unknown'
write_log_line(log_data, path='/socket/close', method='SOCKET',
remote_ip=self.session.conn_info.ip, email=email,
client_name='?')
self.did_close = True
def fake_message_sender(event):
log_data = dict()
record_request_start_data(log_data)
req = event['request']
try:
sender = get_user_profile_by_id(event['server_meta']['user_id'])
client = get_client(req['client'])
msg_id = check_send_message(sender, client, req['type'],
extract_recipients(req['to']),
req['subject'], req['content'],
local_id=req.get('local_id', None),
sender_queue_id=req.get('queue_id', None))
resp = {"result": "success", "msg": "", "id": msg_id}
except JsonableError as e:
resp = {"result": "error", "msg": str(e)}
server_meta = event['server_meta']
server_meta.update({'worker_log_data': log_data,
'time_request_finished': time.time()})
result = {'response': resp, 'req_id': event['req_id'],
'server_meta': server_meta}
respond_send_message(result)
def respond_send_message(data):
log_data = data['server_meta']['log_data']
record_request_restart_data(log_data)
worker_log_data = data['server_meta']['worker_log_data']
forward_queue_delay = worker_log_data['time_started'] - log_data['time_stopped']
return_queue_delay = log_data['time_restarted'] - data['server_meta']['time_request_finished']
service_time = data['server_meta']['time_request_finished'] - worker_log_data['time_started']
log_data['extra'] += ', queue_delay: %s/%s, service_time: %s]' % (
format_timedelta(forward_queue_delay), format_timedelta(return_queue_delay),
format_timedelta(service_time))
client_id = data['server_meta']['client_id']
connection = get_connection(client_id)
if connection is None:
logger.info("Could not find connection to send response to! client_id=%s" % (client_id,))
else:
connection.session.send_message({'req_id': data['req_id'], 'type': 'response',
'response': data['response']})
# TODO: Fill in client name
# TODO: Maybe fill in the status code correctly
write_log_line(log_data, path='/socket/service_request', method='SOCKET',
remote_ip=connection.session.conn_info.ip,
email=connection.session.user_profile.email, client_name='?')
# We disable the eventsource and htmlfile transports because they cannot
# securely send us the zulip.com cookie, which we use as part of our
# authentication scheme.
sockjs_router = sockjs.tornado.SockJSRouter(SocketConnection, "/sockjs",
{'sockjs_url': 'https://%s/static/third/sockjs/sockjs-0.3.4.js' % (settings.EXTERNAL_HOST,),
'disabled_transports': ['eventsource', 'htmlfile']})
def get_sockjs_router():
return sockjs_router
|
zisko/swift | refs/heads/master | utils/build_swift/tests/utils.py | 2 | # This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import os
import sys
import unittest
from contextlib import contextmanager
__all__ = [
'redirect_stderr',
'redirect_stdout',
'TestCase',
]
# -----------------------------------------------------------------------------
@contextmanager
def redirect_stderr(stream=None):
stream = stream or StringIO()
old_stderr, sys.stderr = sys.stderr, stream
try:
yield stream
finally:
sys.stderr = old_stderr
@contextmanager
def redirect_stdout(stream=None):
stream = stream or StringIO()
old_stdout, sys.stdout = sys.stdout, stream
try:
yield stream
finally:
sys.stdout = old_stdout
# -----------------------------------------------------------------------------
class TestCase(unittest.TestCase):
@contextmanager
def quietOutput(self):
with open(os.devnull, 'w') as devnull:
with redirect_stderr(devnull), redirect_stdout(devnull):
yield
@contextmanager
def assertNotRaises(self, exception=BaseException):
assert issubclass(exception, BaseException)
try:
yield
except exception as e:
message = '{} raised: {}'.format(exception.__name__, str(e))
raise self.failureException(message)
|
python-visualization/folium | refs/heads/master | folium/plugins/__init__.py | 2 | # -*- coding: utf-8 -*-
"""
Folium plugins
--------------
Wrap some of the most popular leaflet external plugins.
"""
from folium.plugins.antpath import AntPath
from folium.plugins.polyline_offset import PolyLineOffset
from folium.plugins.beautify_icon import BeautifyIcon
from folium.plugins.boat_marker import BoatMarker
from folium.plugins.draw import Draw
from folium.plugins.dual_map import DualMap
from folium.plugins.fast_marker_cluster import FastMarkerCluster
from folium.plugins.feature_group_sub_group import FeatureGroupSubGroup
from folium.plugins.float_image import FloatImage
from folium.plugins.fullscreen import Fullscreen
from folium.plugins.geocoder import Geocoder
from folium.plugins.heat_map import HeatMap
from folium.plugins.heat_map_withtime import HeatMapWithTime
from folium.plugins.locate_control import LocateControl
from folium.plugins.marker_cluster import MarkerCluster
from folium.plugins.measure_control import MeasureControl
from folium.plugins.minimap import MiniMap
from folium.plugins.mouse_position import MousePosition
from folium.plugins.pattern import CirclePattern, StripePattern
from folium.plugins.polyline_text_path import PolyLineTextPath
from folium.plugins.scroll_zoom_toggler import ScrollZoomToggler
from folium.plugins.search import Search
from folium.plugins.semicircle import SemiCircle
from folium.plugins.terminator import Terminator
from folium.plugins.time_slider_choropleth import TimeSliderChoropleth
from folium.plugins.timestamped_geo_json import TimestampedGeoJson
from folium.plugins.timestamped_wmstilelayer import TimestampedWmsTileLayers
__all__ = [
'AntPath',
'BeautifyIcon',
'BoatMarker',
'CirclePattern',
'Draw',
'DualMap',
'FastMarkerCluster',
'FeatureGroupSubGroup',
'FloatImage',
'Fullscreen',
'Geocoder',
'HeatMap',
'HeatMapWithTime',
'LocateControl',
'MarkerCluster',
'MeasureControl',
'MiniMap',
'MousePosition',
'PolyLineTextPath',
'PolyLineOffset',
'ScrollZoomToggler',
'Search',
'SemiCircle',
'StripePattern',
'Terminator',
'TimeSliderChoropleth',
'TimestampedGeoJson',
'TimestampedWmsTileLayers',
]
|
Deepak345/al-go-rithms | refs/heads/master | math/average_stream_numbers/Python/average_stream_numbers.py | 3 | def newAvg(prevAvg, newN, newX) :
return ((prevAvg * (newN - 1) + newX) / newN)
def main() :
L = [1, 9, 20, 13, 45]
avg = 0
for i in range(len(L)) :
avg = newAvg(avg, (i + 1), L[i])
print(avg)
main()
|
MalloyPower/parsing-python | refs/heads/master | front-end/testsuite-python-lib/Python-2.7.2/Lib/plat-mac/lib-scriptpackages/Finder/__init__.py | 73 | """
Package generated from /System/Library/CoreServices/Finder.app
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the Finder package is removed.", stacklevel=2)
import aetools
Error = aetools.Error
import Standard_Suite
import Legacy_suite
import Containers_and_folders
import Files
import Finder_Basics
import Finder_items
import Window_classes
import Type_Definitions
import Enumerations
_code_to_module = {
'CoRe' : Standard_Suite,
'fleg' : Legacy_suite,
'fndr' : Containers_and_folders,
'fndr' : Files,
'fndr' : Finder_Basics,
'fndr' : Finder_items,
'fndr' : Window_classes,
'tpdf' : Type_Definitions,
'tpnm' : Enumerations,
}
_code_to_fullname = {
'CoRe' : ('Finder.Standard_Suite', 'Standard_Suite'),
'fleg' : ('Finder.Legacy_suite', 'Legacy_suite'),
'fndr' : ('Finder.Containers_and_folders', 'Containers_and_folders'),
'fndr' : ('Finder.Files', 'Files'),
'fndr' : ('Finder.Finder_Basics', 'Finder_Basics'),
'fndr' : ('Finder.Finder_items', 'Finder_items'),
'fndr' : ('Finder.Window_classes', 'Window_classes'),
'tpdf' : ('Finder.Type_Definitions', 'Type_Definitions'),
'tpnm' : ('Finder.Enumerations', 'Enumerations'),
}
from Standard_Suite import *
from Legacy_suite import *
from Containers_and_folders import *
from Files import *
from Finder_Basics import *
from Finder_items import *
from Window_classes import *
from Type_Definitions import *
from Enumerations import *
def getbaseclasses(v):
if not getattr(v, '_propdict', None):
v._propdict = {}
v._elemdict = {}
for superclassname in getattr(v, '_superclassnames', []):
superclass = eval(superclassname)
getbaseclasses(superclass)
v._propdict.update(getattr(superclass, '_propdict', {}))
v._elemdict.update(getattr(superclass, '_elemdict', {}))
v._propdict.update(getattr(v, '_privpropdict', {}))
v._elemdict.update(getattr(v, '_privelemdict', {}))
import StdSuites
#
# Set property and element dictionaries now that all classes have been defined
#
getbaseclasses(StdSuites.Type_Names_Suite.small_integer)
getbaseclasses(StdSuites.Type_Names_Suite.system_dictionary)
getbaseclasses(StdSuites.Type_Names_Suite.color_table)
getbaseclasses(StdSuites.Type_Names_Suite.fixed_point)
getbaseclasses(StdSuites.Type_Names_Suite.string)
getbaseclasses(StdSuites.Type_Names_Suite.type_element_info)
getbaseclasses(StdSuites.Type_Names_Suite.machine_location)
getbaseclasses(StdSuites.Type_Names_Suite.PostScript_picture)
getbaseclasses(StdSuites.Type_Names_Suite.type_property_info)
getbaseclasses(StdSuites.Type_Names_Suite.menu_item)
getbaseclasses(StdSuites.Type_Names_Suite.scrap_styles)
getbaseclasses(StdSuites.Type_Names_Suite.fixed_rectangle)
getbaseclasses(StdSuites.Type_Names_Suite.null)
getbaseclasses(StdSuites.Type_Names_Suite.type_event_info)
getbaseclasses(StdSuites.Type_Names_Suite.rotation)
getbaseclasses(StdSuites.Type_Names_Suite.long_fixed_rectangle)
getbaseclasses(StdSuites.Type_Names_Suite.long_point)
getbaseclasses(StdSuites.Type_Names_Suite.target_id)
getbaseclasses(StdSuites.Type_Names_Suite.type_suite_info)
getbaseclasses(StdSuites.Type_Names_Suite.type_parameter_info)
getbaseclasses(StdSuites.Type_Names_Suite.long_fixed_point)
getbaseclasses(StdSuites.Type_Names_Suite.bounding_rectangle)
getbaseclasses(StdSuites.Type_Names_Suite.TIFF_picture)
getbaseclasses(StdSuites.Type_Names_Suite.long_fixed)
getbaseclasses(StdSuites.Type_Names_Suite.version)
getbaseclasses(StdSuites.Type_Names_Suite.RGB16_color)
getbaseclasses(StdSuites.Type_Names_Suite.double_integer)
getbaseclasses(StdSuites.Type_Names_Suite.location_reference)
getbaseclasses(StdSuites.Type_Names_Suite.point)
getbaseclasses(StdSuites.Type_Names_Suite.application_dictionary)
getbaseclasses(StdSuites.Type_Names_Suite.unsigned_integer)
getbaseclasses(StdSuites.Type_Names_Suite.menu)
getbaseclasses(StdSuites.Type_Names_Suite.small_real)
getbaseclasses(StdSuites.Type_Names_Suite.fixed)
getbaseclasses(StdSuites.Type_Names_Suite.type_class_info)
getbaseclasses(StdSuites.Type_Names_Suite.RGB96_color)
getbaseclasses(StdSuites.Type_Names_Suite.dash_style)
getbaseclasses(StdSuites.Type_Names_Suite.pixel_map_record)
getbaseclasses(StdSuites.Type_Names_Suite.extended_real)
getbaseclasses(StdSuites.Type_Names_Suite.long_rectangle)
getbaseclasses(process)
getbaseclasses(application_process)
getbaseclasses(desk_accessory_process)
getbaseclasses(application)
getbaseclasses(trash_2d_object)
getbaseclasses(desktop_2d_object)
getbaseclasses(container)
getbaseclasses(folder)
getbaseclasses(disk)
getbaseclasses(application)
getbaseclasses(alias_file)
getbaseclasses(package)
getbaseclasses(file)
getbaseclasses(application_file)
getbaseclasses(internet_location_file)
getbaseclasses(document_file)
getbaseclasses(clipping)
getbaseclasses(preferences_window)
getbaseclasses(Finder_window)
getbaseclasses(window)
getbaseclasses(clipping_window)
getbaseclasses(information_window)
getbaseclasses(item)
getbaseclasses(icon_view_options)
getbaseclasses(preferences)
getbaseclasses(alias_list)
getbaseclasses(icon_family)
getbaseclasses(label)
getbaseclasses(column)
getbaseclasses(list_view_options)
#
# Indices of types declared in this module
#
_classdeclarations = {
'shor' : StdSuites.Type_Names_Suite.small_integer,
'aeut' : StdSuites.Type_Names_Suite.system_dictionary,
'clrt' : StdSuites.Type_Names_Suite.color_table,
'fpnt' : StdSuites.Type_Names_Suite.fixed_point,
'TEXT' : StdSuites.Type_Names_Suite.string,
'elin' : StdSuites.Type_Names_Suite.type_element_info,
'mLoc' : StdSuites.Type_Names_Suite.machine_location,
'EPS ' : StdSuites.Type_Names_Suite.PostScript_picture,
'pinf' : StdSuites.Type_Names_Suite.type_property_info,
'cmen' : StdSuites.Type_Names_Suite.menu_item,
'styl' : StdSuites.Type_Names_Suite.scrap_styles,
'frct' : StdSuites.Type_Names_Suite.fixed_rectangle,
'null' : StdSuites.Type_Names_Suite.null,
'evin' : StdSuites.Type_Names_Suite.type_event_info,
'trot' : StdSuites.Type_Names_Suite.rotation,
'lfrc' : StdSuites.Type_Names_Suite.long_fixed_rectangle,
'lpnt' : StdSuites.Type_Names_Suite.long_point,
'targ' : StdSuites.Type_Names_Suite.target_id,
'suin' : StdSuites.Type_Names_Suite.type_suite_info,
'pmin' : StdSuites.Type_Names_Suite.type_parameter_info,
'lfpt' : StdSuites.Type_Names_Suite.long_fixed_point,
'qdrt' : StdSuites.Type_Names_Suite.bounding_rectangle,
'TIFF' : StdSuites.Type_Names_Suite.TIFF_picture,
'lfxd' : StdSuites.Type_Names_Suite.long_fixed,
'vers' : StdSuites.Type_Names_Suite.version,
'tr16' : StdSuites.Type_Names_Suite.RGB16_color,
'comp' : StdSuites.Type_Names_Suite.double_integer,
'insl' : StdSuites.Type_Names_Suite.location_reference,
'QDpt' : StdSuites.Type_Names_Suite.point,
'aete' : StdSuites.Type_Names_Suite.application_dictionary,
'magn' : StdSuites.Type_Names_Suite.unsigned_integer,
'cmnu' : StdSuites.Type_Names_Suite.menu,
'sing' : StdSuites.Type_Names_Suite.small_real,
'fixd' : StdSuites.Type_Names_Suite.fixed,
'gcli' : StdSuites.Type_Names_Suite.type_class_info,
'tr96' : StdSuites.Type_Names_Suite.RGB96_color,
'tdas' : StdSuites.Type_Names_Suite.dash_style,
'tpmm' : StdSuites.Type_Names_Suite.pixel_map_record,
'exte' : StdSuites.Type_Names_Suite.extended_real,
'lrct' : StdSuites.Type_Names_Suite.long_rectangle,
'prcs' : process,
'pcap' : application_process,
'pcda' : desk_accessory_process,
'capp' : application,
'ctrs' : trash_2d_object,
'cdsk' : desktop_2d_object,
'ctnr' : container,
'cfol' : folder,
'cdis' : disk,
'capp' : application,
'alia' : alias_file,
'pack' : package,
'file' : file,
'appf' : application_file,
'inlf' : internet_location_file,
'docf' : document_file,
'clpf' : clipping,
'pwnd' : preferences_window,
'brow' : Finder_window,
'cwin' : window,
'lwnd' : clipping_window,
'iwnd' : information_window,
'cobj' : item,
'icop' : icon_view_options,
'cprf' : preferences,
'alst' : alias_list,
'ifam' : icon_family,
'clbl' : label,
'lvcl' : column,
'lvop' : list_view_options,
}
class Finder(Standard_Suite_Events,
Legacy_suite_Events,
Containers_and_folders_Events,
Files_Events,
Finder_Basics_Events,
Finder_items_Events,
Window_classes_Events,
Type_Definitions_Events,
Enumerations_Events,
aetools.TalkTo):
_signature = 'MACS'
_moduleName = 'Finder'
_elemdict = application._elemdict
_propdict = application._propdict
|
laurentb/weboob | refs/heads/master | modules/mailinator/test.py | 2 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Vincent A
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
class MailinatorTest(BackendTest):
MODULE = 'mailinator'
def test_mailinator(self):
t = self.backend.get_thread('qwerty')
assert t
assert t.root
assert t.root.title
assert t.root.date
assert t.root.sender
assert t.root.receivers
self.backend.fillobj(t.root, ('content',))
assert t.root.content
|
gkoelln/youtube-dl | refs/heads/master | youtube_dl/extractor/discoveryvr.py | 59 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import parse_duration
class DiscoveryVRIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?discoveryvr\.com/watch/(?P<id>[^/?#]+)'
_TEST = {
'url': 'http://www.discoveryvr.com/watch/discovery-vr-an-introduction',
'md5': '32b1929798c464a54356378b7912eca4',
'info_dict': {
'id': 'discovery-vr-an-introduction',
'ext': 'mp4',
'title': 'Discovery VR - An Introduction',
'description': 'md5:80d418a10efb8899d9403e61d8790f06',
}
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
bootstrap_data = self._search_regex(
r'root\.DVR\.bootstrapData\s+=\s+"({.+?})";',
webpage, 'bootstrap data')
bootstrap_data = self._parse_json(
bootstrap_data.encode('utf-8').decode('unicode_escape'),
display_id)
videos = self._parse_json(bootstrap_data['videos'], display_id)['allVideos']
video_data = next(video for video in videos if video.get('slug') == display_id)
series = video_data.get('showTitle')
title = episode = video_data.get('title') or series
if series and series != title:
title = '%s - %s' % (series, title)
formats = []
for f, format_id in (('cdnUriM3U8', 'mobi'), ('webVideoUrlSd', 'sd'), ('webVideoUrlHd', 'hd')):
f_url = video_data.get(f)
if not f_url:
continue
formats.append({
'format_id': format_id,
'url': f_url,
})
return {
'id': display_id,
'display_id': display_id,
'title': title,
'description': video_data.get('description'),
'thumbnail': video_data.get('thumbnail'),
'duration': parse_duration(video_data.get('runTime')),
'formats': formats,
'episode': episode,
'series': series,
}
|
molly/twitterbot_framework | refs/heads/master | secrets.py | 2 | C_KEY = ""
C_SECRET = ""
A_TOKEN = ""
A_TOKEN_SECRET = "" |
dahlstrom-g/intellij-community | refs/heads/master | python/testData/formatter/multilineIfConditionKeywordAtStart_after.py | 22 | if foo is None \
and bar == 42:
pass
|
MQQiang/kbengine | refs/heads/master | kbe/src/lib/python/Lib/test/test_exceptions.py | 75 | # Python test set -- part 5, built-in exceptions
import os
import sys
import unittest
import pickle
import weakref
import errno
from test.support import (TESTFN, captured_output, check_impl_detail,
check_warnings, cpython_only, gc_collect, run_unittest,
no_tracing, unlink)
class NaiveException(Exception):
def __init__(self, x):
self.x = x
class SlottedNaiveException(Exception):
__slots__ = ('x',)
def __init__(self, x):
self.x = x
# XXX This is not really enough, each *operation* should be tested!
class ExceptionTests(unittest.TestCase):
def raise_catch(self, exc, excname):
try:
raise exc("spam")
except exc as err:
buf1 = str(err)
try:
raise exc("spam")
except exc as err:
buf2 = str(err)
self.assertEqual(buf1, buf2)
self.assertEqual(exc.__name__, excname)
def testRaising(self):
self.raise_catch(AttributeError, "AttributeError")
self.assertRaises(AttributeError, getattr, sys, "undefined_attribute")
self.raise_catch(EOFError, "EOFError")
fp = open(TESTFN, 'w')
fp.close()
fp = open(TESTFN, 'r')
savestdin = sys.stdin
try:
try:
import marshal
marshal.loads(b'')
except EOFError:
pass
finally:
sys.stdin = savestdin
fp.close()
unlink(TESTFN)
self.raise_catch(OSError, "OSError")
self.assertRaises(OSError, open, 'this file does not exist', 'r')
self.raise_catch(ImportError, "ImportError")
self.assertRaises(ImportError, __import__, "undefined_module")
self.raise_catch(IndexError, "IndexError")
x = []
self.assertRaises(IndexError, x.__getitem__, 10)
self.raise_catch(KeyError, "KeyError")
x = {}
self.assertRaises(KeyError, x.__getitem__, 'key')
self.raise_catch(KeyboardInterrupt, "KeyboardInterrupt")
self.raise_catch(MemoryError, "MemoryError")
self.raise_catch(NameError, "NameError")
try: x = undefined_variable
except NameError: pass
self.raise_catch(OverflowError, "OverflowError")
x = 1
for dummy in range(128):
x += x # this simply shouldn't blow up
self.raise_catch(RuntimeError, "RuntimeError")
self.raise_catch(SyntaxError, "SyntaxError")
try: exec('/\n')
except SyntaxError: pass
self.raise_catch(IndentationError, "IndentationError")
self.raise_catch(TabError, "TabError")
try: compile("try:\n\t1/0\n \t1/0\nfinally:\n pass\n",
'<string>', 'exec')
except TabError: pass
else: self.fail("TabError not raised")
self.raise_catch(SystemError, "SystemError")
self.raise_catch(SystemExit, "SystemExit")
self.assertRaises(SystemExit, sys.exit, 0)
self.raise_catch(TypeError, "TypeError")
try: [] + ()
except TypeError: pass
self.raise_catch(ValueError, "ValueError")
self.assertRaises(ValueError, chr, 17<<16)
self.raise_catch(ZeroDivisionError, "ZeroDivisionError")
try: x = 1/0
except ZeroDivisionError: pass
self.raise_catch(Exception, "Exception")
try: x = 1/0
except Exception as e: pass
def testSyntaxErrorMessage(self):
# make sure the right exception message is raised for each of
# these code fragments
def ckmsg(src, msg):
try:
compile(src, '<fragment>', 'exec')
except SyntaxError as e:
if e.msg != msg:
self.fail("expected %s, got %s" % (msg, e.msg))
else:
self.fail("failed to get expected SyntaxError")
s = '''while 1:
try:
pass
finally:
continue'''
if not sys.platform.startswith('java'):
ckmsg(s, "'continue' not supported inside 'finally' clause")
s = '''if 1:
try:
continue
except:
pass'''
ckmsg(s, "'continue' not properly in loop")
ckmsg("continue\n", "'continue' not properly in loop")
def testSyntaxErrorOffset(self):
def check(src, lineno, offset):
with self.assertRaises(SyntaxError) as cm:
compile(src, '<fragment>', 'exec')
self.assertEqual(cm.exception.lineno, lineno)
self.assertEqual(cm.exception.offset, offset)
check('def fact(x):\n\treturn x!\n', 2, 10)
check('1 +\n', 1, 4)
check('def spam():\n print(1)\n print(2)', 3, 10)
check('Python = "Python" +', 1, 20)
check('Python = "\u1e54\xfd\u0163\u0125\xf2\xf1" +', 1, 20)
@cpython_only
def testSettingException(self):
# test that setting an exception at the C level works even if the
# exception object can't be constructed.
class BadException(Exception):
def __init__(self_):
raise RuntimeError("can't instantiate BadException")
class InvalidException:
pass
def test_capi1():
import _testcapi
try:
_testcapi.raise_exception(BadException, 1)
except TypeError as err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "test_capi1")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
else:
self.fail("Expected exception")
def test_capi2():
import _testcapi
try:
_testcapi.raise_exception(BadException, 0)
except RuntimeError as err:
exc, err, tb = sys.exc_info()
co = tb.tb_frame.f_code
self.assertEqual(co.co_name, "__init__")
self.assertTrue(co.co_filename.endswith('test_exceptions.py'))
co2 = tb.tb_frame.f_back.f_code
self.assertEqual(co2.co_name, "test_capi2")
else:
self.fail("Expected exception")
def test_capi3():
import _testcapi
self.assertRaises(SystemError, _testcapi.raise_exception,
InvalidException, 1)
if not sys.platform.startswith('java'):
test_capi1()
test_capi2()
test_capi3()
def test_WindowsError(self):
try:
WindowsError
except NameError:
pass
else:
self.assertIs(WindowsError, OSError)
self.assertEqual(str(OSError(1001)), "1001")
self.assertEqual(str(OSError(1001, "message")),
"[Errno 1001] message")
# POSIX errno (9 aka EBADF) is untranslated
w = OSError(9, 'foo', 'bar')
self.assertEqual(w.errno, 9)
self.assertEqual(w.winerror, None)
self.assertEqual(str(w), "[Errno 9] foo: 'bar'")
# ERROR_PATH_NOT_FOUND (win error 3) becomes ENOENT (2)
w = OSError(0, 'foo', 'bar', 3)
self.assertEqual(w.errno, 2)
self.assertEqual(w.winerror, 3)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, 'bar')
self.assertEqual(str(w), "[WinError 3] foo: 'bar'")
# Unknown win error becomes EINVAL (22)
w = OSError(0, 'foo', None, 1001)
self.assertEqual(w.errno, 22)
self.assertEqual(w.winerror, 1001)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, None)
self.assertEqual(str(w), "[WinError 1001] foo")
# Non-numeric "errno"
w = OSError('bar', 'foo')
self.assertEqual(w.errno, 'bar')
self.assertEqual(w.winerror, None)
self.assertEqual(w.strerror, 'foo')
self.assertEqual(w.filename, None)
def testAttributes(self):
# test that exception attributes are happy
exceptionList = [
(BaseException, (), {'args' : ()}),
(BaseException, (1, ), {'args' : (1,)}),
(BaseException, ('foo',),
{'args' : ('foo',)}),
(BaseException, ('foo', 1),
{'args' : ('foo', 1)}),
(SystemExit, ('foo',),
{'args' : ('foo',), 'code' : 'foo'}),
(OSError, ('foo',),
{'args' : ('foo',), 'filename' : None,
'errno' : None, 'strerror' : None}),
(OSError, ('foo', 'bar'),
{'args' : ('foo', 'bar'), 'filename' : None,
'errno' : 'foo', 'strerror' : 'bar'}),
(OSError, ('foo', 'bar', 'baz'),
{'args' : ('foo', 'bar'), 'filename' : 'baz',
'errno' : 'foo', 'strerror' : 'bar'}),
(OSError, ('foo', 'bar', 'baz', None, 'quux'),
{'args' : ('foo', 'bar'), 'filename' : 'baz', 'filename2': 'quux'}),
(OSError, ('errnoStr', 'strErrorStr', 'filenameStr'),
{'args' : ('errnoStr', 'strErrorStr'),
'strerror' : 'strErrorStr', 'errno' : 'errnoStr',
'filename' : 'filenameStr'}),
(OSError, (1, 'strErrorStr', 'filenameStr'),
{'args' : (1, 'strErrorStr'), 'errno' : 1,
'strerror' : 'strErrorStr', 'filename' : 'filenameStr'}),
(SyntaxError, (), {'msg' : None, 'text' : None,
'filename' : None, 'lineno' : None, 'offset' : None,
'print_file_and_line' : None}),
(SyntaxError, ('msgStr',),
{'args' : ('msgStr',), 'text' : None,
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(SyntaxError, ('msgStr', ('filenameStr', 'linenoStr', 'offsetStr',
'textStr')),
{'offset' : 'offsetStr', 'text' : 'textStr',
'args' : ('msgStr', ('filenameStr', 'linenoStr',
'offsetStr', 'textStr')),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : 'filenameStr', 'lineno' : 'linenoStr'}),
(SyntaxError, ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
{'text' : None,
'args' : ('msgStr', 'filenameStr', 'linenoStr', 'offsetStr',
'textStr', 'print_file_and_lineStr'),
'print_file_and_line' : None, 'msg' : 'msgStr',
'filename' : None, 'lineno' : None, 'offset' : None}),
(UnicodeError, (), {'args' : (),}),
(UnicodeEncodeError, ('ascii', 'a', 0, 1,
'ordinal not in range'),
{'args' : ('ascii', 'a', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : 'a',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', bytearray(b'\xff'), 0, 1,
'ordinal not in range'),
{'args' : ('ascii', bytearray(b'\xff'), 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeDecodeError, ('ascii', b'\xff', 0, 1,
'ordinal not in range'),
{'args' : ('ascii', b'\xff', 0, 1,
'ordinal not in range'),
'encoding' : 'ascii', 'object' : b'\xff',
'start' : 0, 'reason' : 'ordinal not in range'}),
(UnicodeTranslateError, ("\u3042", 0, 1, "ouch"),
{'args' : ('\u3042', 0, 1, 'ouch'),
'object' : '\u3042', 'reason' : 'ouch',
'start' : 0, 'end' : 1}),
(NaiveException, ('foo',),
{'args': ('foo',), 'x': 'foo'}),
(SlottedNaiveException, ('foo',),
{'args': ('foo',), 'x': 'foo'}),
]
try:
# More tests are in test_WindowsError
exceptionList.append(
(WindowsError, (1, 'strErrorStr', 'filenameStr'),
{'args' : (1, 'strErrorStr'),
'strerror' : 'strErrorStr', 'winerror' : None,
'errno' : 1, 'filename' : 'filenameStr'})
)
except NameError:
pass
for exc, args, expected in exceptionList:
try:
e = exc(*args)
except:
print("\nexc=%r, args=%r" % (exc, args), file=sys.stderr)
raise
else:
# Verify module name
if not type(e).__name__.endswith('NaiveException'):
self.assertEqual(type(e).__module__, 'builtins')
# Verify no ref leaks in Exc_str()
s = str(e)
for checkArgName in expected:
value = getattr(e, checkArgName)
self.assertEqual(repr(value),
repr(expected[checkArgName]),
'%r.%s == %r, expected %r' % (
e, checkArgName,
value, expected[checkArgName]))
# test for pickling support
for p in [pickle]:
for protocol in range(p.HIGHEST_PROTOCOL + 1):
s = p.dumps(e, protocol)
new = p.loads(s)
for checkArgName in expected:
got = repr(getattr(new, checkArgName))
want = repr(expected[checkArgName])
self.assertEqual(got, want,
'pickled "%r", attribute "%s' %
(e, checkArgName))
def testWithTraceback(self):
try:
raise IndexError(4)
except:
tb = sys.exc_info()[2]
e = BaseException().with_traceback(tb)
self.assertIsInstance(e, BaseException)
self.assertEqual(e.__traceback__, tb)
e = IndexError(5).with_traceback(tb)
self.assertIsInstance(e, IndexError)
self.assertEqual(e.__traceback__, tb)
class MyException(Exception):
pass
e = MyException().with_traceback(tb)
self.assertIsInstance(e, MyException)
self.assertEqual(e.__traceback__, tb)
def testInvalidTraceback(self):
try:
Exception().__traceback__ = 5
except TypeError as e:
self.assertIn("__traceback__ must be a traceback", str(e))
else:
self.fail("No exception raised")
def testInvalidAttrs(self):
self.assertRaises(TypeError, setattr, Exception(), '__cause__', 1)
self.assertRaises(TypeError, delattr, Exception(), '__cause__')
self.assertRaises(TypeError, setattr, Exception(), '__context__', 1)
self.assertRaises(TypeError, delattr, Exception(), '__context__')
def testNoneClearsTracebackAttr(self):
try:
raise IndexError(4)
except:
tb = sys.exc_info()[2]
e = Exception()
e.__traceback__ = tb
e.__traceback__ = None
self.assertEqual(e.__traceback__, None)
def testChainingAttrs(self):
e = Exception()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
e = TypeError()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
class MyException(OSError):
pass
e = MyException()
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
def testChainingDescriptors(self):
try:
raise Exception()
except Exception as exc:
e = exc
self.assertIsNone(e.__context__)
self.assertIsNone(e.__cause__)
self.assertFalse(e.__suppress_context__)
e.__context__ = NameError()
e.__cause__ = None
self.assertIsInstance(e.__context__, NameError)
self.assertIsNone(e.__cause__)
self.assertTrue(e.__suppress_context__)
e.__suppress_context__ = False
self.assertFalse(e.__suppress_context__)
def testKeywordArgs(self):
# test that builtin exception don't take keyword args,
# but user-defined subclasses can if they want
self.assertRaises(TypeError, BaseException, a=1)
class DerivedException(BaseException):
def __init__(self, fancy_arg):
BaseException.__init__(self)
self.fancy_arg = fancy_arg
x = DerivedException(fancy_arg=42)
self.assertEqual(x.fancy_arg, 42)
@no_tracing
def testInfiniteRecursion(self):
def f():
return f()
self.assertRaises(RuntimeError, f)
def g():
try:
return g()
except ValueError:
return -1
self.assertRaises(RuntimeError, g)
def test_str(self):
# Make sure both instances and classes have a str representation.
self.assertTrue(str(Exception))
self.assertTrue(str(Exception('a')))
self.assertTrue(str(Exception('a', 'b')))
def testExceptionCleanupNames(self):
# Make sure the local variable bound to the exception instance by
# an "except" statement is only visible inside the except block.
try:
raise Exception()
except Exception as e:
self.assertTrue(e)
del e
self.assertNotIn('e', locals())
def testExceptionCleanupState(self):
# Make sure exception state is cleaned up as soon as the except
# block is left. See #2507
class MyException(Exception):
def __init__(self, obj):
self.obj = obj
class MyObj:
pass
def inner_raising_func():
# Create some references in exception value and traceback
local_ref = obj
raise MyException(obj)
# Qualified "except" with "as"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException as e:
pass
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# Qualified "except" without "as"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException:
pass
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# Bare "except"
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except:
pass
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# "except" with premature block leave
obj = MyObj()
wr = weakref.ref(obj)
for i in [0]:
try:
inner_raising_func()
except:
break
obj = None
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# "except" block raising another exception
obj = MyObj()
wr = weakref.ref(obj)
try:
try:
inner_raising_func()
except:
raise KeyError
except KeyError as e:
# We want to test that the except block above got rid of
# the exception raised in inner_raising_func(), but it
# also ends up in the __context__ of the KeyError, so we
# must clear the latter manually for our test to succeed.
e.__context__ = None
obj = None
obj = wr()
# guarantee no ref cycles on CPython (don't gc_collect)
if check_impl_detail(cpython=False):
gc_collect()
self.assertTrue(obj is None, "%s" % obj)
# Some complicated construct
obj = MyObj()
wr = weakref.ref(obj)
try:
inner_raising_func()
except MyException:
try:
try:
raise
finally:
raise
except MyException:
pass
obj = None
if check_impl_detail(cpython=False):
gc_collect()
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
# Inside an exception-silencing "with" block
class Context:
def __enter__(self):
return self
def __exit__ (self, exc_type, exc_value, exc_tb):
return True
obj = MyObj()
wr = weakref.ref(obj)
with Context():
inner_raising_func()
obj = None
if check_impl_detail(cpython=False):
gc_collect()
obj = wr()
self.assertTrue(obj is None, "%s" % obj)
def test_exception_target_in_nested_scope(self):
# issue 4617: This used to raise a SyntaxError
# "can not delete variable 'e' referenced in nested scope"
def print_error():
e
try:
something
except Exception as e:
print_error()
# implicit "del e" here
def test_generator_leaking(self):
# Test that generator exception state doesn't leak into the calling
# frame
def yield_raise():
try:
raise KeyError("caught")
except KeyError:
yield sys.exc_info()[0]
yield sys.exc_info()[0]
yield sys.exc_info()[0]
g = yield_raise()
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], None)
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], None)
self.assertEqual(next(g), None)
# Same test, but inside an exception handler
try:
raise TypeError("foo")
except TypeError:
g = yield_raise()
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(next(g), KeyError)
self.assertEqual(sys.exc_info()[0], TypeError)
self.assertEqual(next(g), TypeError)
del g
self.assertEqual(sys.exc_info()[0], TypeError)
def test_generator_leaking2(self):
# See issue 12475.
def g():
yield
try:
raise RuntimeError
except RuntimeError:
it = g()
next(it)
try:
next(it)
except StopIteration:
pass
self.assertEqual(sys.exc_info(), (None, None, None))
def test_generator_doesnt_retain_old_exc(self):
def g():
self.assertIsInstance(sys.exc_info()[1], RuntimeError)
yield
self.assertEqual(sys.exc_info(), (None, None, None))
it = g()
try:
raise RuntimeError
except RuntimeError:
next(it)
self.assertRaises(StopIteration, next, it)
def test_generator_finalizing_and_exc_info(self):
# See #7173
def simple_gen():
yield 1
def run_gen():
gen = simple_gen()
try:
raise RuntimeError
except RuntimeError:
return next(gen)
run_gen()
gc_collect()
self.assertEqual(sys.exc_info(), (None, None, None))
def _check_generator_cleanup_exc_state(self, testfunc):
# Issue #12791: exception state is cleaned up as soon as a generator
# is closed (reference cycles are broken).
class MyException(Exception):
def __init__(self, obj):
self.obj = obj
class MyObj:
pass
def raising_gen():
try:
raise MyException(obj)
except MyException:
yield
obj = MyObj()
wr = weakref.ref(obj)
g = raising_gen()
next(g)
testfunc(g)
g = obj = None
obj = wr()
self.assertIs(obj, None)
def test_generator_throw_cleanup_exc_state(self):
def do_throw(g):
try:
g.throw(RuntimeError())
except RuntimeError:
pass
self._check_generator_cleanup_exc_state(do_throw)
def test_generator_close_cleanup_exc_state(self):
def do_close(g):
g.close()
self._check_generator_cleanup_exc_state(do_close)
def test_generator_del_cleanup_exc_state(self):
def do_del(g):
g = None
self._check_generator_cleanup_exc_state(do_del)
def test_generator_next_cleanup_exc_state(self):
def do_next(g):
try:
next(g)
except StopIteration:
pass
else:
self.fail("should have raised StopIteration")
self._check_generator_cleanup_exc_state(do_next)
def test_generator_send_cleanup_exc_state(self):
def do_send(g):
try:
g.send(None)
except StopIteration:
pass
else:
self.fail("should have raised StopIteration")
self._check_generator_cleanup_exc_state(do_send)
def test_3114(self):
# Bug #3114: in its destructor, MyObject retrieves a pointer to
# obsolete and/or deallocated objects.
class MyObject:
def __del__(self):
nonlocal e
e = sys.exc_info()
e = ()
try:
raise Exception(MyObject())
except:
pass
self.assertEqual(e, (None, None, None))
def test_unicode_change_attributes(self):
# See issue 7309. This was a crasher.
u = UnicodeEncodeError('baz', 'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't encode character '\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't encode characters in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't encode characters in position 1000-4: 965230951443685724997")
u = UnicodeDecodeError('baz', b'xxxxx', 1, 5, 'foo')
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "'baz' codec can't decode byte 0x78 in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "'baz' codec can't decode bytes in position 1-4: 965230951443685724997")
u.encoding = 4000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "'4000' codec can't decode bytes in position 1000-4: 965230951443685724997")
u = UnicodeTranslateError('xxxx', 1, 5, 'foo')
self.assertEqual(str(u), "can't translate characters in position 1-4: foo")
u.end = 2
self.assertEqual(str(u), "can't translate character '\\x78' in position 1: foo")
u.end = 5
u.reason = 0x345345345345345345
self.assertEqual(str(u), "can't translate characters in position 1-4: 965230951443685724997")
u.start = 1000
self.assertEqual(str(u), "can't translate characters in position 1000-4: 965230951443685724997")
def test_unicode_errors_no_object(self):
# See issue #21134.
klasses = UnicodeEncodeError, UnicodeDecodeError, UnicodeTranslateError
for klass in klasses:
self.assertEqual(str(klass.__new__(klass)), "")
@no_tracing
def test_badisinstance(self):
# Bug #2542: if issubclass(e, MyException) raises an exception,
# it should be ignored
class Meta(type):
def __subclasscheck__(cls, subclass):
raise ValueError()
class MyException(Exception, metaclass=Meta):
pass
with captured_output("stderr") as stderr:
try:
raise KeyError()
except MyException as e:
self.fail("exception should not be a MyException")
except KeyError:
pass
except:
self.fail("Should have raised KeyError")
else:
self.fail("Should have raised KeyError")
def g():
try:
return g()
except RuntimeError:
return sys.exc_info()
e, v, tb = g()
self.assertTrue(isinstance(v, RuntimeError), type(v))
self.assertIn("maximum recursion depth exceeded", str(v))
@cpython_only
def test_MemoryError(self):
# PyErr_NoMemory always raises the same exception instance.
# Check that the traceback is not doubled.
import traceback
from _testcapi import raise_memoryerror
def raiseMemError():
try:
raise_memoryerror()
except MemoryError as e:
tb = e.__traceback__
else:
self.fail("Should have raises a MemoryError")
return traceback.format_tb(tb)
tb1 = raiseMemError()
tb2 = raiseMemError()
self.assertEqual(tb1, tb2)
@cpython_only
def test_exception_with_doc(self):
import _testcapi
doc2 = "This is a test docstring."
doc4 = "This is another test docstring."
self.assertRaises(SystemError, _testcapi.make_exception_with_doc,
"error1")
# test basic usage of PyErr_NewException
error1 = _testcapi.make_exception_with_doc("_testcapi.error1")
self.assertIs(type(error1), type)
self.assertTrue(issubclass(error1, Exception))
self.assertIsNone(error1.__doc__)
# test with given docstring
error2 = _testcapi.make_exception_with_doc("_testcapi.error2", doc2)
self.assertEqual(error2.__doc__, doc2)
# test with explicit base (without docstring)
error3 = _testcapi.make_exception_with_doc("_testcapi.error3",
base=error2)
self.assertTrue(issubclass(error3, error2))
# test with explicit base tuple
class C(object):
pass
error4 = _testcapi.make_exception_with_doc("_testcapi.error4", doc4,
(error3, C))
self.assertTrue(issubclass(error4, error3))
self.assertTrue(issubclass(error4, C))
self.assertEqual(error4.__doc__, doc4)
# test with explicit dictionary
error5 = _testcapi.make_exception_with_doc("_testcapi.error5", "",
error4, {'a': 1})
self.assertTrue(issubclass(error5, error4))
self.assertEqual(error5.a, 1)
self.assertEqual(error5.__doc__, "")
@cpython_only
def test_memory_error_cleanup(self):
# Issue #5437: preallocated MemoryError instances should not keep
# traceback objects alive.
from _testcapi import raise_memoryerror
class C:
pass
wr = None
def inner():
nonlocal wr
c = C()
wr = weakref.ref(c)
raise_memoryerror()
# We cannot use assertRaises since it manually deletes the traceback
try:
inner()
except MemoryError as e:
self.assertNotEqual(wr(), None)
else:
self.fail("MemoryError not raised")
self.assertEqual(wr(), None)
@no_tracing
def test_recursion_error_cleanup(self):
# Same test as above, but with "recursion exceeded" errors
class C:
pass
wr = None
def inner():
nonlocal wr
c = C()
wr = weakref.ref(c)
inner()
# We cannot use assertRaises since it manually deletes the traceback
try:
inner()
except RuntimeError as e:
self.assertNotEqual(wr(), None)
else:
self.fail("RuntimeError not raised")
self.assertEqual(wr(), None)
def test_errno_ENOTDIR(self):
# Issue #12802: "not a directory" errors are ENOTDIR even on Windows
with self.assertRaises(OSError) as cm:
os.listdir(__file__)
self.assertEqual(cm.exception.errno, errno.ENOTDIR, cm.exception)
class ImportErrorTests(unittest.TestCase):
def test_attributes(self):
# Setting 'name' and 'path' should not be a problem.
exc = ImportError('test')
self.assertIsNone(exc.name)
self.assertIsNone(exc.path)
exc = ImportError('test', name='somemodule')
self.assertEqual(exc.name, 'somemodule')
self.assertIsNone(exc.path)
exc = ImportError('test', path='somepath')
self.assertEqual(exc.path, 'somepath')
self.assertIsNone(exc.name)
exc = ImportError('test', path='somepath', name='somename')
self.assertEqual(exc.name, 'somename')
self.assertEqual(exc.path, 'somepath')
def test_non_str_argument(self):
# Issue #15778
with check_warnings(('', BytesWarning), quiet=True):
arg = b'abc'
exc = ImportError(arg)
self.assertEqual(str(arg), str(exc))
if __name__ == '__main__':
unittest.main()
|
bayazee/Zereshk | refs/heads/master | zereshk/core/zclient.py | 1 | import zmq
import sys
port = 7766
context = zmq.Context()
print "Connecting to server..."
socket = context.socket(zmq.REQ)
# socket.connect("tcp://192.168.1.12:%s" % port)
socket.connect("tcp://localhost:%s" % port)
if len(sys.argv) > 1:
link = sys.argv[1]
print "Sending link ", link, "..."
socket.send_json({'link': link})
# Get the reply.
message = socket.recv()
print "Received reply [", message, "]"
else:
while True:
link = raw_input('zdl> ')
print "Sending link ", link, "..."
socket.send_json({'link': link})
# Get the reply.
message = socket.recv()
print "Received reply [", message, "]"
|
PriceChild/ansible | refs/heads/devel | lib/ansible/modules/cloud/openstack/os_ironic_node.py | 49 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2015, Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_ironic_node
short_description: Activate/Deactivate Bare Metal Resources from OpenStack
author: "Monty Taylor (@emonty)"
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Deploy to nodes controlled by Ironic.
options:
state:
description:
- Indicates desired state of the resource
choices: ['present', 'absent']
default: present
deploy:
description:
- Indicates if the resource should be deployed. Allows for deployment
logic to be disengaged and control of the node power or maintenance
state to be changed.
choices: ['true', 'false']
default: true
uuid:
description:
- globally unique identifier (UUID) to be given to the resource.
required: false
default: None
ironic_url:
description:
- If noauth mode is utilized, this is required to be set to the
endpoint URL for the Ironic API. Use with "auth" and "auth_type"
settings set to None.
required: false
default: None
config_drive:
description:
- A configdrive file or HTTP(S) URL that will be passed along to the
node.
required: false
default: None
instance_info:
description:
- Definition of the instance information which is used to deploy
the node. This information is only required when an instance is
set to present.
suboptions:
image_source:
description:
- An HTTP(S) URL where the image can be retrieved from.
image_checksum:
description:
- The checksum of image_source.
image_disk_format:
description:
- The type of image that has been requested to be deployed.
power:
description:
- A setting to allow power state to be asserted allowing nodes
that are not yet deployed to be powered on, and nodes that
are deployed to be powered off.
choices: ['present', 'absent']
default: present
maintenance:
description:
- A setting to allow the direct control if a node is in
maintenance mode.
required: false
default: false
maintenance_reason:
description:
- A string expression regarding the reason a node is in a
maintenance mode.
required: false
default: None
wait:
description:
- A boolean value instructing the module to wait for node
activation or deactivation to complete before returning.
required: false
default: False
version_added: "2.1"
timeout:
description:
- An integer value representing the number of seconds to
wait for the node activation or deactivation to complete.
version_added: "2.1"
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
'''
EXAMPLES = '''
# Activate a node by booting an image with a configdrive attached
os_ironic_node:
cloud: "openstack"
uuid: "d44666e1-35b3-4f6b-acb0-88ab7052da69"
state: present
power: present
deploy: True
maintenance: False
config_drive: "http://192.168.1.1/host-configdrive.iso"
instance_info:
image_source: "http://192.168.1.1/deploy_image.img"
image_checksum: "356a6b55ecc511a20c33c946c4e678af"
image_disk_format: "qcow"
delegate_to: localhost
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _choose_id_value(module):
if module.params['uuid']:
return module.params['uuid']
if module.params['name']:
return module.params['name']
return None
# TODO(TheJulia): Change this over to use the machine patch method
# in shade once it is available.
def _prepare_instance_info_patch(instance_info):
patch = []
patch.append({
'op': 'replace',
'path': '/instance_info',
'value': instance_info
})
return patch
def _is_true(value):
true_values = [True, 'yes', 'Yes', 'True', 'true', 'present', 'on']
if value in true_values:
return True
return False
def _is_false(value):
false_values = [False, None, 'no', 'No', 'False', 'false', 'absent', 'off']
if value in false_values:
return True
return False
def _check_set_maintenance(module, cloud, node):
if _is_true(module.params['maintenance']):
if _is_false(node['maintenance']):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node has been set into "
"maintenance mode")
else:
# User has requested maintenance state, node is already in the
# desired state, checking to see if the reason has changed.
if (str(node['maintenance_reason']) not in
str(module.params['maintenance_reason'])):
cloud.set_machine_maintenance_state(
node['uuid'],
True,
reason=module.params['maintenance_reason'])
module.exit_json(changed=True, msg="Node maintenance reason "
"updated, cannot take any "
"additional action.")
elif _is_false(module.params['maintenance']):
if node['maintenance'] is True:
cloud.remove_machine_from_maintenance(node['uuid'])
return True
else:
module.fail_json(msg="maintenance parameter was set but a valid "
"the value was not recognized.")
return False
def _check_set_power_state(module, cloud, node):
if 'power on' in str(node['power_state']):
if _is_false(module.params['power']):
# User has requested the node be powered off.
cloud.set_machine_power_off(node['uuid'])
module.exit_json(changed=True, msg="Power requested off")
if 'power off' in str(node['power_state']):
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
return False
if (_is_false(module.params['power']) and
_is_false(module.params['state'])):
module.exit_json(
changed=False,
msg="Power for node is %s, node must be reactivated "
"OR set to state absent"
)
# In the event the power has been toggled on and
# deployment has been requested, we need to skip this
# step.
if (_is_true(module.params['power']) and
_is_false(module.params['deploy'])):
# Node is powered down when it is not awaiting to be provisioned
cloud.set_machine_power_on(node['uuid'])
return True
# Default False if no action has been taken.
return False
def main():
argument_spec = openstack_full_argument_spec(
uuid=dict(required=False),
name=dict(required=False),
instance_info=dict(type='dict', required=False),
config_drive=dict(required=False),
ironic_url=dict(required=False),
state=dict(required=False, default='present'),
maintenance=dict(required=False),
maintenance_reason=dict(required=False),
power=dict(required=False, default='present'),
deploy=dict(required=False, default=True),
wait=dict(type='bool', required=False, default=False),
timeout=dict(required=False, type='int', default=1800),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['wait'] and
StrictVersion(shade.__version__) < StrictVersion('1.4.0')):
module.fail_json(msg="To utilize wait, the installed version of"
"the shade library MUST be >=1.4.0")
if (module.params['auth_type'] in [None, 'None'] and
module.params['ironic_url'] is None):
module.fail_json(msg="Authentication appears disabled, Please "
"define an ironic_url parameter")
if (module.params['ironic_url'] and
module.params['auth_type'] in [None, 'None']):
module.params['auth'] = dict(
endpoint=module.params['ironic_url']
)
node_id = _choose_id_value(module)
if not node_id:
module.fail_json(msg="A uuid or name value must be defined "
"to use this module.")
try:
cloud = shade.operator_cloud(**module.params)
node = cloud.get_machine(node_id)
if node is None:
module.fail_json(msg="node not found")
uuid = node['uuid']
instance_info = module.params['instance_info']
changed = False
wait = module.params['wait']
timeout = module.params['timeout']
# User has reqeusted desired state to be in maintenance state.
if module.params['state'] is 'maintenance':
module.params['maintenance'] = True
if node['provision_state'] in [
'cleaning',
'deleting',
'wait call-back']:
module.fail_json(msg="Node is in %s state, cannot act upon the "
"request as the node is in a transition "
"state" % node['provision_state'])
# TODO(TheJulia) This is in-development code, that requires
# code in the shade library that is still in development.
if _check_set_maintenance(module, cloud, node):
if node['provision_state'] in 'active':
module.exit_json(changed=True,
result="Maintenance state changed")
changed = True
node = cloud.get_machine(node_id)
if _check_set_power_state(module, cloud, node):
changed = True
node = cloud.get_machine(node_id)
if _is_true(module.params['state']):
if _is_false(module.params['deploy']):
module.exit_json(
changed=changed,
result="User request has explicitly disabled "
"deployment logic"
)
if 'active' in node['provision_state']:
module.exit_json(
changed=changed,
result="Node already in an active state."
)
if instance_info is None:
module.fail_json(
changed=changed,
msg="When setting an instance to present, "
"instance_info is a required variable.")
# TODO(TheJulia): Update instance info, however info is
# deployment specific. Perhaps consider adding rebuild
# support, although there is a known desire to remove
# rebuild support from Ironic at some point in the future.
patch = _prepare_instance_info_patch(instance_info)
cloud.set_node_instance_info(uuid, patch)
cloud.validate_node(uuid)
if not wait:
cloud.activate_node(uuid, module.params['config_drive'])
else:
cloud.activate_node(
uuid,
configdrive=module.params['config_drive'],
wait=wait,
timeout=timeout)
# TODO(TheJulia): Add more error checking..
module.exit_json(changed=changed, result="node activated")
elif _is_false(module.params['state']):
if node['provision_state'] not in "deleted":
cloud.purge_node_instance_info(uuid)
if not wait:
cloud.deactivate_node(uuid)
else:
cloud.deactivate_node(
uuid,
wait=wait,
timeout=timeout)
module.exit_json(changed=True, result="deleted")
else:
module.exit_json(changed=False, result="node not found")
else:
module.fail_json(msg="State must be present, absent, "
"maintenance, off")
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == "__main__":
main()
|
lach76/scancode-toolkit | refs/heads/develop | tests/cluecode/data/ics/chromium-testing-gmock-test/gmock_test_utils.py | 13 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
mjfarmer/scada_py | refs/heads/master | env/lib/python2.7/site-packages/zope/interface/tests/test_interfaces.py | 27 | import unittest
class _ConformsToIObjectEvent(object):
def _makeOne(self, target=None):
if target is None:
target = object()
return self._getTargetClass()(target)
def test_class_conforms_to_IObjectEvent(self):
from zope.interface.interfaces import IObjectEvent
from zope.interface.verify import verifyClass
verifyClass(IObjectEvent, self._getTargetClass())
def test_instance_conforms_to_IObjectEvent(self):
from zope.interface.interfaces import IObjectEvent
from zope.interface.verify import verifyObject
verifyObject(IObjectEvent, self._makeOne())
class _ConformsToIRegistrationEvent(_ConformsToIObjectEvent):
def test_class_conforms_to_IRegistrationEvent(self):
from zope.interface.interfaces import IRegistrationEvent
from zope.interface.verify import verifyClass
verifyClass(IRegistrationEvent, self._getTargetClass())
def test_instance_conforms_to_IRegistrationEvent(self):
from zope.interface.interfaces import IRegistrationEvent
from zope.interface.verify import verifyObject
verifyObject(IRegistrationEvent, self._makeOne())
class ObjectEventTests(unittest.TestCase, _ConformsToIObjectEvent):
def _getTargetClass(self):
from zope.interface.interfaces import ObjectEvent
return ObjectEvent
def test_ctor(self):
target = object()
event = self._makeOne(target)
self.assertTrue(event.object is target)
class RegistrationEventTests(unittest.TestCase,
_ConformsToIRegistrationEvent):
def _getTargetClass(self):
from zope.interface.interfaces import RegistrationEvent
return RegistrationEvent
def test___repr__(self):
target = object()
event = self._makeOne(target)
r = repr(event)
self.assertEqual(r.splitlines(),
['RegistrationEvent event:', repr(target)])
class RegisteredTests(unittest.TestCase,
_ConformsToIRegistrationEvent):
def _getTargetClass(self):
from zope.interface.interfaces import Registered
return Registered
def test_class_conforms_to_IRegistered(self):
from zope.interface.interfaces import IRegistered
from zope.interface.verify import verifyClass
verifyClass(IRegistered, self._getTargetClass())
def test_instance_conforms_to_IRegistered(self):
from zope.interface.interfaces import IRegistered
from zope.interface.verify import verifyObject
verifyObject(IRegistered, self._makeOne())
class UnregisteredTests(unittest.TestCase,
_ConformsToIRegistrationEvent):
def _getTargetClass(self):
from zope.interface.interfaces import Unregistered
return Unregistered
def test_class_conforms_to_IUnregistered(self):
from zope.interface.interfaces import IUnregistered
from zope.interface.verify import verifyClass
verifyClass(IUnregistered, self._getTargetClass())
def test_instance_conforms_to_IUnregistered(self):
from zope.interface.interfaces import IUnregistered
from zope.interface.verify import verifyObject
verifyObject(IUnregistered, self._makeOne())
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(ObjectEventTests),
unittest.makeSuite(RegistrationEventTests),
unittest.makeSuite(RegisteredTests),
unittest.makeSuite(UnregisteredTests),
))
|
MER-GROUP/intellij-community | refs/heads/master | python/testData/inspections/PyTupleAssignmentBalanceInspection/src/test.py | 53 | a, b, c = 1, 2, 3, 4
a, b, c = foo, bar
a, b, c, d = 1, 2, 3, 4
a = 1, 2, 3, 4
a, b, c = 2 |
pombredanne/grr | refs/heads/master | executables/python_hacks/shutdown_host.py | 16 | #!/usr/bin/env python
"""Shut down windows hosts."""
import platform
tested_versions = ['xp', 'vista', '2008', '2003']
cmd = 'cmd'
args = ['/c', '%SystemRoot%\\System32\\shutdown.exe', '/s', '/f']
os_version = platform.platform().lower()
# pylint: disable=undefined-variable
if 'time_in_seconds' in py_args:
args.extend(['/t', py_args['time_in_seconds']])
else:
args.extend(['/t', '20'])
if 'reason' in py_args:
args.extend(['/c', py_args['reason']])
for version in tested_versions:
if os_version.find(version) != -1:
stdout, stderr, exit_status, time_taken = client_utils_common.Execute(
cmd, args, time_limit=-1, bypass_whitelist=True)
magic_return_str = '%s, %s, %s, %s' % (stdout.encode('base64'),
stderr.encode('base64'),
exit_status,
time_taken)
break
|
ahuarte47/QGIS | refs/heads/master | tests/src/python/test_qgsreport.py | 45 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsReport
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '29/12/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsProject,
QgsLayout,
QgsReport,
QgsReportSectionLayout,
QgsReportSectionFieldGroup,
QgsVectorLayer,
QgsField,
QgsFeature,
QgsReadWriteContext,
QgsUnitTypes)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtXml import QDomDocument
start_app()
class TestQgsReport(unittest.TestCase):
def testGettersSetters(self):
p = QgsProject()
r = QgsReport(p)
self.assertEqual(r.layoutProject(), p)
self.assertEqual(r.project(), p)
r.setHeaderEnabled(True)
self.assertTrue(r.headerEnabled())
header = QgsLayout(p)
r.setHeader(header)
self.assertEqual(r.header(), header)
r.setFooterEnabled(True)
self.assertTrue(r.footerEnabled())
footer = QgsLayout(p)
r.setFooter(footer)
self.assertEqual(r.footer(), footer)
def testchildSections(self):
p = QgsProject()
r = QgsReport(p)
self.assertEqual(r.childCount(), 0)
self.assertEqual(r.childSections(), [])
self.assertIsNone(r.childSection(-1))
self.assertIsNone(r.childSection(1))
self.assertIsNone(r.childSection(0))
# try deleting non-existent childSections
r.removeChildAt(-1)
r.removeChildAt(0)
r.removeChildAt(100)
r.removeChild(None)
# append child
child1 = QgsReportSectionLayout()
self.assertIsNone(child1.project())
r.appendChild(child1)
self.assertEqual(r.childCount(), 1)
self.assertEqual(r.childSections(), [child1])
self.assertEqual(r.childSection(0), child1)
self.assertEqual(child1.parentSection(), r)
self.assertEqual(child1.row(), 0)
self.assertEqual(child1.project(), p)
child2 = QgsReportSectionLayout()
r.appendChild(child2)
self.assertEqual(r.childCount(), 2)
self.assertEqual(r.childSections(), [child1, child2])
self.assertEqual(r.childSection(1), child2)
self.assertEqual(child2.parentSection(), r)
self.assertEqual(child2.row(), 1)
def testInsertChild(self):
p = QgsProject()
r = QgsReport(p)
child1 = QgsReportSectionLayout()
r.insertChild(11, child1)
self.assertEqual(r.childCount(), 1)
self.assertEqual(r.childSections(), [child1])
self.assertEqual(child1.parentSection(), r)
self.assertEqual(child1.row(), 0)
child2 = QgsReportSectionLayout()
r.insertChild(-1, child2)
self.assertEqual(r.childCount(), 2)
self.assertEqual(r.childSections(), [child2, child1])
self.assertEqual(child2.parentSection(), r)
self.assertEqual(child2.row(), 0)
self.assertEqual(child1.row(), 1)
def testRemoveChild(self):
p = QgsProject()
r = QgsReport(p)
child1 = QgsReportSectionLayout()
r.appendChild(child1)
child2 = QgsReportSectionLayout()
r.appendChild(child2)
r.removeChildAt(-1)
r.removeChildAt(100)
r.removeChild(None)
self.assertEqual(r.childCount(), 2)
self.assertEqual(r.childSections(), [child1, child2])
r.removeChildAt(1)
self.assertEqual(r.childCount(), 1)
self.assertEqual(r.childSections(), [child1])
r.removeChild(child1)
self.assertEqual(r.childCount(), 0)
self.assertEqual(r.childSections(), [])
def testClone(self):
p = QgsProject()
r = QgsReport(p)
child1 = QgsReportSectionLayout()
child1.setHeaderEnabled(True)
r.appendChild(child1)
child2 = QgsReportSectionLayout()
child2.setFooterEnabled(True)
r.appendChild(child2)
cloned = r.clone()
self.assertEqual(cloned.childCount(), 2)
self.assertTrue(cloned.childSection(0).headerEnabled())
self.assertFalse(cloned.childSection(0).footerEnabled())
self.assertEqual(cloned.childSection(0).parentSection(), cloned)
self.assertFalse(cloned.childSection(1).headerEnabled())
self.assertTrue(cloned.childSection(1).footerEnabled())
self.assertEqual(cloned.childSection(1).parentSection(), cloned)
def testReportSectionLayout(self):
r = QgsReportSectionLayout()
p = QgsProject()
body = QgsLayout(p)
r.setBody(body)
self.assertEqual(r.body(), body)
def testIteration(self):
p = QgsProject()
r = QgsReport(p)
# empty report
self.assertTrue(r.beginRender())
self.assertFalse(r.next())
# add a header
r.setHeaderEnabled(True)
report_header = QgsLayout(p)
r.setHeader(report_header)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertFalse(r.next())
# add a footer
r.setFooterEnabled(True)
report_footer = QgsLayout(p)
r.setFooter(report_footer)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertFalse(r.next())
# add a child
child1 = QgsReportSectionLayout()
child1_body = QgsLayout(p)
child1.setBody(child1_body)
r.appendChild(child1)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertFalse(r.next())
# header and footer on child
child1_header = QgsLayout(p)
child1.setHeader(child1_header)
child1.setHeaderEnabled(True)
child1_footer = QgsLayout(p)
child1.setFooter(child1_footer)
child1.setFooterEnabled(True)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_footer)
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertFalse(r.next())
# add another child
child2 = QgsReportSectionLayout()
child2_header = QgsLayout(p)
child2.setHeader(child2_header)
child2.setHeaderEnabled(True)
child2_footer = QgsLayout(p)
child2.setFooter(child2_footer)
child2.setFooterEnabled(True)
r.appendChild(child2)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_footer)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertFalse(r.next())
# add a child to child2
child2a = QgsReportSectionLayout()
child2a_header = QgsLayout(p)
child2a.setHeader(child2a_header)
child2a.setHeaderEnabled(True)
child2a_footer = QgsLayout(p)
child2a.setFooter(child2a_footer)
child2a.setFooterEnabled(True)
child2.appendChild(child2a)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0001.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_header)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0002.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.filePath('/tmp/myreport', '.png'), '/tmp/myreport_0003.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_footer)
self.assertEqual(r.filePath('/tmp/myreport', 'jpg'), '/tmp/myreport_0004.jpg')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0005.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2a_header)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0006.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2a_footer)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0007.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0008.png')
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertEqual(r.filePath('/tmp/myreport', 'png'), '/tmp/myreport_0009.png')
self.assertFalse(r.next())
def testFieldGroup(self):
# create a layer
ptLayer = QgsVectorLayer("Point?crs=epsg:4326&field=country:string(20)&field=state:string(20)&field=town:string(20)", "points", "memory")
attributes = [
['Australia', 'QLD', 'Brisbane'],
['Australia', 'QLD', 'Emerald'],
['NZ', 'state1', 'town1'],
['Australia', 'VIC', 'Melbourne'],
['NZ', 'state1', 'town2'],
['Australia', 'QLD', 'Beerburrum'],
['Australia', 'VIC', 'Geelong'],
['NZ', 'state2', 'town2'],
['PNG', 'state1', 'town1'],
['Australia', 'NSW', 'Sydney']
]
pr = ptLayer.dataProvider()
for a in attributes:
f = QgsFeature()
f.initAttributes(3)
f.setAttribute(0, a[0])
f.setAttribute(1, a[1])
f.setAttribute(2, a[2])
self.assertTrue(pr.addFeature(f))
p = QgsProject()
r = QgsReport(p)
# add a child
child1 = QgsReportSectionFieldGroup()
child1_body = QgsLayout(p)
child1.setLayer(ptLayer)
child1.setBody(child1_body)
child1.setBodyEnabled(True)
child1.setField('country')
r.appendChild(child1)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertFalse(r.next())
# another group
# remove body from child1
child1.setBodyEnabled(False)
child2 = QgsReportSectionFieldGroup()
child2_body = QgsLayout(p)
child2.setLayer(ptLayer)
child2.setBody(child2_body)
child2.setBodyEnabled(True)
child2.setField('state')
child1.appendChild(child2)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertFalse(r.next())
# another group
# remove body from child1
child2.setBodyEnabled(False)
child3 = QgsReportSectionFieldGroup()
child3_body = QgsLayout(p)
child3.setLayer(ptLayer)
child3.setBody(child3_body)
child3.setBodyEnabled(True)
child3.setField('town')
child3.setSortAscending(False)
child2.appendChild(child3)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertFalse(r.next())
# add headers/footers
child3_header = QgsLayout(p)
child3.setHeader(child3_header)
child3.setHeaderEnabled(True)
child3_footer = QgsLayout(p)
child3.setFooter(child3_footer)
child3.setFooterEnabled(True)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertFalse(r.next())
# header/footer for section2
child2_header = QgsLayout(p)
child2.setHeader(child2_header)
child2.setHeaderEnabled(True)
child2_footer = QgsLayout(p)
child2.setFooter(child2_footer)
child2.setFooterEnabled(True)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['PNG', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['PNG', 'state1'])
self.assertFalse(r.next())
# child 1 and report header/footer
child1_header = QgsLayout(p)
child1.setHeader(child1_header)
child1.setHeaderEnabled(True)
child1_footer = QgsLayout(p)
child1.setFooter(child1_footer)
child1.setFooterEnabled(True)
report_header = QgsLayout(p)
r.setHeader(report_header)
r.setHeaderEnabled(True)
report_footer = QgsLayout(p)
r.setFooter(report_footer)
r.setFooterEnabled(True)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:1], ['Australia'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW', 'Sydney'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_header)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['PNG', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:2], ['PNG', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_footer)
self.assertEqual(r.layout().reportContext().feature().attributes()[:1], ['PNG'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), report_footer)
self.assertFalse(r.next())
def testFieldGroupSectionVisibility(self):
states = QgsVectorLayer("Point?crs=epsg:4326&field=country:string(20)&field=state:string(20)", "points", "memory")
p = QgsProject()
r = QgsReport(p)
# add a child
child1 = QgsReportSectionFieldGroup()
child1.setLayer(states)
child1.setField('country')
child1_header = QgsLayout(p)
child1.setHeader(child1_header)
child1.setHeaderEnabled(True)
child1_footer = QgsLayout(p)
child1.setFooter(child1_footer)
child1.setFooterEnabled(True)
r.appendChild(child1)
# check that no header was rendered when no features are found
self.assertTrue(r.beginRender())
self.assertFalse(r.next())
child1.setHeaderVisibility(QgsReportSectionFieldGroup.AlwaysInclude)
child1.setFooterVisibility(QgsReportSectionFieldGroup.AlwaysInclude)
# check that the header is included when no features are found
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_header)
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_footer)
def testFieldGroupMultiLayer(self):
# create a layer
states = QgsVectorLayer("Point?crs=epsg:4326&field=country:string(20)&field=state:string(20)", "points", "memory")
attributes = [
['Australia', 'QLD'],
['NZ', 'state1'],
['Australia', 'VIC'],
['NZ', 'state2'],
['PNG', 'state3'],
['Australia', 'NSW']
]
pr = states.dataProvider()
for a in attributes:
f = QgsFeature()
f.initAttributes(2)
f.setAttribute(0, a[0])
f.setAttribute(1, a[1])
self.assertTrue(pr.addFeature(f))
places = QgsVectorLayer("Point?crs=epsg:4326&field=state:string(20)&field=town:string(20)", "points", "memory")
attributes = [
['QLD', 'Brisbane'],
['QLD', 'Emerald'],
['state1', 'town1'],
['VIC', 'Melbourne'],
['state1', 'town2'],
['QLD', 'Beerburrum'],
['VIC', 'Geelong'],
['state3', 'town1']
]
pr = places.dataProvider()
for a in attributes:
f = QgsFeature()
f.initAttributes(2)
f.setAttribute(0, a[0])
f.setAttribute(1, a[1])
self.assertTrue(pr.addFeature(f))
p = QgsProject()
r = QgsReport(p)
# add a child
child1 = QgsReportSectionFieldGroup()
child1_body = QgsLayout(p)
child1.setLayer(states)
child1.setBody(child1_body)
child1.setBodyEnabled(True)
child1.setField('country')
r.appendChild(child1)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child1_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state3'])
self.assertFalse(r.next())
# another group
# remove body from child1
child1.setBodyEnabled(False)
child2 = QgsReportSectionFieldGroup()
child2_body = QgsLayout(p)
child2.setLayer(states)
child2.setBody(child2_body)
child2.setBodyEnabled(True)
child2.setField('state')
child1.appendChild(child2)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state3'])
self.assertFalse(r.next())
# another group
child3 = QgsReportSectionFieldGroup()
child3_body = QgsLayout(p)
child3.setLayer(places)
child3.setBody(child3_body)
child3.setBodyEnabled(True)
child3.setField('town')
child3.setSortAscending(False)
child2.appendChild(child3)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state3'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state3', 'town1'])
self.assertFalse(r.next())
# add headers/footers
child3_header = QgsLayout(p)
child3.setHeader(child3_header)
child3.setHeaderEnabled(True)
child3_footer = QgsLayout(p)
child3.setFooter(child3_footer)
child3.setFooterEnabled(True)
self.assertTrue(r.beginRender())
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'NSW'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'QLD'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Emerald'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Brisbane'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['QLD', 'Beerburrum'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['Australia', 'VIC'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Melbourne'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['VIC', 'Geelong'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state1', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['NZ', 'state2'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child2_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['PNG', 'state3'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_header)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state3', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_body)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state3', 'town1'])
self.assertTrue(r.next())
self.assertEqual(r.layout(), child3_footer)
self.assertEqual(r.layout().reportContext().feature().attributes(), ['state3', 'town1'])
self.assertFalse(r.next())
def testReadWriteXml(self):
p = QgsProject()
ptLayer = QgsVectorLayer("Point?crs=epsg:4326&field=country:string(20)&field=state:string(20)&field=town:string(20)", "points", "memory")
p.addMapLayer(ptLayer)
r = QgsReport(p)
r.setName('my report')
# add a header
r.setHeaderEnabled(True)
report_header = QgsLayout(p)
report_header.setUnits(QgsUnitTypes.LayoutInches)
r.setHeader(report_header)
# add a footer
r.setFooterEnabled(True)
report_footer = QgsLayout(p)
report_footer.setUnits(QgsUnitTypes.LayoutMeters)
r.setFooter(report_footer)
# add some subsections
child1 = QgsReportSectionLayout()
child1_body = QgsLayout(p)
child1_body.setUnits(QgsUnitTypes.LayoutPoints)
child1.setBody(child1_body)
child2 = QgsReportSectionLayout()
child2_body = QgsLayout(p)
child2_body.setUnits(QgsUnitTypes.LayoutPixels)
child2.setBody(child2_body)
child1.appendChild(child2)
child2a = QgsReportSectionFieldGroup()
child2a_body = QgsLayout(p)
child2a_body.setUnits(QgsUnitTypes.LayoutInches)
child2a.setBody(child2a_body)
child2a.setField('my field')
child2a.setLayer(ptLayer)
child1.appendChild(child2a)
r.appendChild(child1)
doc = QDomDocument("testdoc")
elem = r.writeLayoutXml(doc, QgsReadWriteContext())
r2 = QgsReport(p)
self.assertTrue(r2.readLayoutXml(elem, doc, QgsReadWriteContext()))
self.assertEqual(r2.name(), 'my report')
self.assertTrue(r2.headerEnabled())
self.assertEqual(r2.header().units(), QgsUnitTypes.LayoutInches)
self.assertTrue(r2.footerEnabled())
self.assertEqual(r2.footer().units(), QgsUnitTypes.LayoutMeters)
self.assertEqual(r2.childCount(), 1)
self.assertEqual(r2.childSection(0).body().units(), QgsUnitTypes.LayoutPoints)
self.assertEqual(r2.childSection(0).childCount(), 2)
self.assertEqual(r2.childSection(0).childSection(0).body().units(), QgsUnitTypes.LayoutPixels)
self.assertEqual(r2.childSection(0).childSection(1).body().units(), QgsUnitTypes.LayoutInches)
self.assertEqual(r2.childSection(0).childSection(1).field(), 'my field')
self.assertEqual(r2.childSection(0).childSection(1).layer(), ptLayer)
if __name__ == '__main__':
unittest.main()
|
poljeff/odoo | refs/heads/8.0 | addons/point_of_sale/controllers/__init__.py | 382 | import main
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
FCP-INDI/C-PAC | refs/heads/master | CPAC/utils/interfaces/__init__.py | 1 | from . import function
from . import masktool
from . import pc
from . import brickstat
from . import datasink
__all__ = [
'function',
'masktool',
'pc',
'brickstat',
'datasink',
] |
philipn/django-south | refs/heads/localwiki_master | south/introspection_plugins/__init__.py | 129 | # This module contains built-in introspector plugins for various common
# Django apps.
# These imports trigger the lower-down files
import south.introspection_plugins.geodjango
import south.introspection_plugins.django_audit_log
import south.introspection_plugins.django_tagging
import south.introspection_plugins.django_taggit
import south.introspection_plugins.django_objectpermissions
import south.introspection_plugins.annoying_autoonetoone
|
wfxiang08/django178 | refs/heads/master | tests/context_processors/tests.py | 21 | """
Tests for Django's bundled context processors.
"""
from django.test import TestCase
class RequestContextProcessorTests(TestCase):
"""
Tests for the ``django.core.context_processors.request`` processor.
"""
urls = 'context_processors.urls'
def test_request_attributes(self):
"""
Test that the request object is available in the template and that its
attributes can't be overridden by GET and POST parameters (#3828).
"""
url = '/request_attrs/'
# We should have the request object in the template.
response = self.client.get(url)
self.assertContains(response, 'Have request')
# Test is_secure.
response = self.client.get(url)
self.assertContains(response, 'Not secure')
response = self.client.get(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
response = self.client.post(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
# Test path.
response = self.client.get(url)
self.assertContains(response, url)
response = self.client.get(url, {'path': '/blah/'})
self.assertContains(response, url)
response = self.client.post(url, {'path': '/blah/'})
self.assertContains(response, url)
|
jriehl/numba | refs/heads/master | numba/tests/test_warnings.py | 1 | from __future__ import print_function
import warnings
import numpy as np
import numba.unittest_support as unittest
from numba import jit
from numba.errors import NumbaWarning, deprecated, NumbaDeprecationWarning
from numba import errors
class TestBuiltins(unittest.TestCase):
def check_objmode_deprecation_warning(self, w):
# Object mode fall-back is slated for deprecation, check the warning
msg = ("Fall-back from the nopython compilation path to the object "
"mode compilation path has been detected")
self.assertEqual(w.category, NumbaDeprecationWarning)
self.assertIn(msg, str(w.message))
def test_type_infer_warning(self):
def add(x, y):
a = {} # noqa dead
return x + y
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
cfunc = jit(add)
cfunc(1, 2)
self.assertEqual(len(w), 3)
# Type inference failure
self.assertEqual(w[0].category, NumbaWarning)
self.assertIn('type inference', str(w[0].message))
# Object mode
self.assertEqual(w[1].category, NumbaWarning)
self.assertIn('object mode', str(w[1].message))
# check objmode deprecation warning
self.check_objmode_deprecation_warning(w[2])
def test_return_type_warning(self):
y = np.ones(4, dtype=np.float32)
def return_external_array():
return y
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
cfunc = jit(_nrt=False)(return_external_array)
cfunc()
self.assertEqual(len(w), 3)
# Legal return value failure
self.assertEqual(w[0].category, NumbaWarning)
self.assertIn('return type', str(w[0].message))
# Object mode fall-back
self.assertEqual(w[1].category, NumbaWarning)
self.assertIn('object mode without forceobj=True',
str(w[1].message))
# check objmode deprecation warning
self.check_objmode_deprecation_warning(w[2])
def test_return_type_warning_with_nrt(self):
"""
Rerun test_return_type_warning with nrt
"""
y = np.ones(4, dtype=np.float32)
def return_external_array():
return y
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
cfunc = jit(return_external_array)
cfunc()
# No more warning
self.assertEqual(len(w), 0)
def test_no_warning_with_forceobj(self):
def add(x, y):
a = [] # noqa dead
return x + y
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
cfunc = jit(add, forceobj=True)
cfunc(1, 2)
self.assertEqual(len(w), 0)
def test_loop_lift_warn(self):
def do_loop(x):
a = {} # noqa dead
for i in range(x.shape[0]):
x[i] *= 2
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
x = np.ones(4, dtype=np.float32)
cfunc = jit(do_loop)
cfunc(x)
self.assertEqual(len(w), 4)
# Type inference failure (1st pass, in npm, fall-back to objmode
# with looplift)
self.assertEqual(w[0].category, NumbaWarning)
self.assertIn('type inference', str(w[0].message))
self.assertIn('WITH looplifting', str(w[0].message))
# Type inference failure (2nd pass, objmode with lifted loops,
# loop found but still failed, fall back to objmode no looplift)
self.assertEqual(w[1].category, NumbaWarning)
self.assertIn('type inference', str(w[1].message))
self.assertIn('WITHOUT looplifting', str(w[1].message))
# States compilation outcome
self.assertEqual(w[2].category, NumbaWarning)
self.assertIn('compiled in object mode without forceobj=True',
str(w[2].message))
self.assertIn('but has lifted loops', str(w[2].message))
# check objmode deprecation warning
self.check_objmode_deprecation_warning(w[3])
def test_deprecated(self):
@deprecated('foo')
def bar():
pass
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
bar()
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, DeprecationWarning)
self.assertIn('bar', str(w[0].message))
self.assertIn('foo', str(w[0].message))
def test_warnings_fixer(self):
# For some context, see #4083
wfix = errors.WarningsFixer(errors.NumbaWarning)
with wfix.catch_warnings('foo', 10):
warnings.warn(errors.NumbaWarning('same'))
warnings.warn(errors.NumbaDeprecationWarning('same'))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
wfix.flush()
self.assertEqual(len(w), 2)
# the order of these will be backwards to the above, the
# WarningsFixer flush method sorts with a key based on str
# comparison
self.assertEqual(w[0].category, NumbaDeprecationWarning)
self.assertEqual(w[1].category, NumbaWarning)
self.assertIn('same', str(w[0].message))
self.assertIn('same', str(w[1].message))
if __name__ == '__main__':
unittest.main()
|
nitin-cherian/LifeLongLearning | refs/heads/master | Web_Development_Python/RealPython/real-python-test/env/lib/python3.5/site-packages/pip/commands/hash.py | 514 | from __future__ import absolute_import
import hashlib
import logging
import sys
from pip.basecommand import Command
from pip.status_codes import ERROR
from pip.utils import read_chunks
from pip.utils.hashes import FAVORITE_HASH, STRONG_HASHES
logger = logging.getLogger(__name__)
class HashCommand(Command):
"""
Compute a hash of a local package archive.
These can be used with --hash in a requirements file to do repeatable
installs.
"""
name = 'hash'
usage = '%prog [options] <file> ...'
summary = 'Compute hashes of package archives.'
def __init__(self, *args, **kw):
super(HashCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-a', '--algorithm',
dest='algorithm',
choices=STRONG_HASHES,
action='store',
default=FAVORITE_HASH,
help='The hash algorithm to use: one of %s' %
', '.join(STRONG_HASHES))
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
if not args:
self.parser.print_usage(sys.stderr)
return ERROR
algorithm = options.algorithm
for path in args:
logger.info('%s:\n--hash=%s:%s',
path, algorithm, _hash_of_file(path, algorithm))
def _hash_of_file(path, algorithm):
"""Return the hash digest of a file."""
with open(path, 'rb') as archive:
hash = hashlib.new(algorithm)
for chunk in read_chunks(archive):
hash.update(chunk)
return hash.hexdigest()
|
elventear/ansible | refs/heads/devel | contrib/inventory/fleet.py | 56 | #!/usr/bin/env python
"""
fleetctl base external inventory script. Automatically finds the IPs of the booted coreos instances and
returns it under the host group 'coreos'
"""
# Copyright (C) 2014 Andrew Rothstein <andrew.rothstein at gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Thanks to the vagrant.py inventory script for giving me the basic structure
# of this.
#
import sys
import subprocess
import re
import string
from optparse import OptionParser
try:
import json
except:
import simplejson as json
# Options
#------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
help="Produce a JSON consumable grouping of servers in your fleet")
parser.add_option('--host', default=None, dest="host",
help="Generate additional host specific details for given host for Ansible")
(options, args) = parser.parse_args()
#
# helper functions
#
def get_ssh_config():
configs = []
for box in list_running_boxes():
config = get_a_ssh_config(box)
configs.append(config)
return configs
#list all the running instances in the fleet
def list_running_boxes():
boxes = []
for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'):
matcher = re.search("[^\s]+[\s]+([^\s]+).+", line)
if matcher and matcher.group(1) != "IP":
boxes.append(matcher.group(1))
return boxes
def get_a_ssh_config(box_name):
config = {}
config['Host'] = box_name
config['ansible_ssh_user'] = 'core'
config['ansible_python_interpreter'] = '/opt/bin/python'
return config
# List out servers that vagrant has running
#------------------------------
if options.list:
ssh_config = get_ssh_config()
hosts = { 'coreos': []}
for data in ssh_config:
hosts['coreos'].append(data['Host'])
print(json.dumps(hosts))
sys.exit(1)
# Get out the host details
#------------------------------
elif options.host:
result = {}
ssh_config = get_ssh_config()
details = filter(lambda x: (x['Host'] == options.host), ssh_config)
if len(details) > 0:
#pass through the port, in case it's non standard.
result = details[0]
result
print(json.dumps(result))
sys.exit(1)
# Print out help
#------------------------------
else:
parser.print_help()
sys.exit(1)
|
gsathya/bridgedb | refs/heads/master | lib/bridgedb/__init__.py | 6 | #
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: Isis Lovecruft 0xA3ADB67A2CDB8B35 <isis@torproject.org>
# please also see AUTHORS file
# :copyright: (c) 2007-2013, The Tor Project, Inc.
# (c) 2007-2013, all entities within the AUTHORS file
# :license: 3-clause BSD, see included LICENSE for information
from ._version import get_versions
from ._langs import get_langs
__version__ = get_versions()['version']
__langs__ = get_langs()
del get_versions
del get_langs
|
ecosoft-odoo/odoo | refs/heads/8.0 | addons/website_crm/__init__.py | 1350 | import controllers
|
dahlstrom-g/intellij-community | refs/heads/master | python/testData/codeInsight/mlcompletion/prev2calls/assignmentVisitorCheckAnotherPackage.py | 10 | import pandas as pd
def foo:
pass
df = pd.read_csv(pd.compat.StringIO(fruit_price))
df['label'] = df.apply(foo, axis=1)
<caret> |
jawad6233/MT6795.kernel | refs/heads/master | alps/kernel-3.10/tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
yoer/hue | refs/heads/master | apps/oozie/src/oozie/migrations/0015_auto__add_field_dataset_advanced_start_instance__add_field_dataset_ins.py | 39 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Dataset.advanced_start_instance'
db.add_column('oozie_dataset', 'advanced_start_instance', self.gf('django.db.models.fields.CharField')(default='0', max_length=128), keep_default=False)
# Adding field 'Dataset.instance_choice'
db.add_column('oozie_dataset', 'instance_choice', self.gf('django.db.models.fields.CharField')(default='default', max_length=10), keep_default=False)
# Adding field 'Dataset.advanced_end_instance'
db.add_column('oozie_dataset', 'advanced_end_instance', self.gf('django.db.models.fields.CharField')(default='0', max_length=128, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Dataset.advanced_start_instance'
db.delete_column('oozie_dataset', 'advanced_start_instance')
# Deleting field 'Dataset.instance_choice'
db.delete_column('oozie_dataset', 'instance_choice')
# Deleting field 'Dataset.advanced_end_instance'
db.delete_column('oozie_dataset', 'advanced_end_instance')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 6, 19, 26, 33, 676504)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 3, 19, 26, 33, 676468)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'advanced_end_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128', 'blank': 'True'}),
'advanced_start_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_choice': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 3, 19, 26, 33, 677121)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.decision': {
'Meta': {'object_name': 'Decision'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.decisionend': {
'Meta': {'object_name': 'DecisionEnd'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.email': {
'Meta': {'object_name': 'Email'},
'body': ('django.db.models.fields.TextField', [], {'default': "''"}),
'cc': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'default': "''"}),
'to': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fs': {
'Meta': {'object_name': 'Fs'},
'chmods': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'deletes': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'mkdirs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'moves': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'touchzs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'})
},
'oozie.generic': {
'Meta': {'object_name': 'Generic'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''"})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.hive.defaults","value":"hive-site.xml"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'blank': 'True'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.subworkflow': {
'Meta': {'object_name': 'SubWorkflow'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'propagate_configuration': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'sub_workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
|
jvdm/AutobahnPython | refs/heads/master | examples/twisted/websocket/echo_tls/client.py | 11 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
from optparse import OptionParser
from twisted.python import log
from twisted.internet import reactor, ssl
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
class EchoClientProtocol(WebSocketClientProtocol):
def sendHello(self):
self.sendMessage("Hello, world!".encode('utf8'))
def onOpen(self):
self.sendHello()
def onMessage(self, payload, isBinary):
if not isBinary:
print("Text message received: {}".format(payload.decode('utf8')))
reactor.callLater(1, self.sendHello)
if __name__ == '__main__':
log.startLogging(sys.stdout)
parser = OptionParser()
parser.add_option("-u", "--url", dest="url", help="The WebSocket URL", default="wss://127.0.0.1:9000")
(options, args) = parser.parse_args()
# create a WS server factory with our protocol
##
factory = WebSocketClientFactory(options.url, debug=False)
factory.protocol = EchoClientProtocol
# SSL client context: default
##
if factory.isSecure:
contextFactory = ssl.ClientContextFactory()
else:
contextFactory = None
connectWS(factory, contextFactory)
reactor.run()
|
doctorOb/thoughtsbydrob | refs/heads/master | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/filters/__init__.py | 196 | # -*- coding: utf-8 -*-
"""
pygments.filters
~~~~~~~~~~~~~~~~
Module containing filter lookup functions and default
filters.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
get_choice_opt, ClassNotFound, OptionError
from pygments.plugin import find_plugin_filters
def find_filter_class(filtername):
"""
Lookup a filter by name. Return None if not found.
"""
if filtername in FILTERS:
return FILTERS[filtername]
for name, cls in find_plugin_filters():
if name == filtername:
return cls
return None
def get_filter_by_name(filtername, **options):
"""
Return an instantiated filter. Options are passed to the filter
initializer if wanted. Raise a ClassNotFound if not found.
"""
cls = find_filter_class(filtername)
if cls:
return cls(**options)
else:
raise ClassNotFound('filter %r not found' % filtername)
def get_all_filters():
"""
Return a generator of all filter names.
"""
for name in FILTERS:
yield name
for name, _ in find_plugin_filters():
yield name
def _replace_special(ttype, value, regex, specialttype,
replacefunc=lambda x: x):
last = 0
for match in regex.finditer(value):
start, end = match.start(), match.end()
if start != last:
yield ttype, value[last:start]
yield specialttype, replacefunc(value[start:end])
last = end
if last != len(value):
yield ttype, value[last:]
class CodeTagFilter(Filter):
"""
Highlight special code tags in comments and docstrings.
Options accepted:
`codetags` : list of strings
A list of strings that are flagged as code tags. The default is to
highlight ``XXX``, ``TODO``, ``BUG`` and ``NOTE``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
tags = get_list_opt(options, 'codetags',
['XXX', 'TODO', 'BUG', 'NOTE'])
self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
re.escape(tag) for tag in tags if tag
]))
def filter(self, lexer, stream):
regex = self.tag_re
for ttype, value in stream:
if ttype in String.Doc or \
ttype in Comment and \
ttype not in Comment.Preproc:
for sttype, svalue in _replace_special(ttype, value, regex,
Comment.Special):
yield sttype, svalue
else:
yield ttype, value
class KeywordCaseFilter(Filter):
"""
Convert keywords to lowercase or uppercase or capitalize them, which
means first letter uppercase, rest lowercase.
This can be useful e.g. if you highlight Pascal code and want to adapt the
code to your styleguide.
Options accepted:
`case` : string
The casing to convert keywords to. Must be one of ``'lower'``,
``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case', ['lower', 'upper', 'capitalize'], 'lower')
self.convert = getattr(unicode, case)
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Keyword:
yield ttype, self.convert(value)
else:
yield ttype, value
class NameHighlightFilter(Filter):
"""
Highlight a normal Name (and Name.*) token with a different token type.
Example::
filter = NameHighlightFilter(
names=['foo', 'bar', 'baz'],
tokentype=Name.Function,
)
This would highlight the names "foo", "bar" and "baz"
as functions. `Name.Function` is the default token type.
Options accepted:
`names` : list of strings
A list of names that should be given the different token type.
There is no default.
`tokentype` : TokenType or string
A token type or a string containing a token type name that is
used for highlighting the strings in `names`. The default is
`Name.Function`.
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.names = set(get_list_opt(options, 'names', []))
tokentype = options.get('tokentype')
if tokentype:
self.tokentype = string_to_tokentype(tokentype)
else:
self.tokentype = Name.Function
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype in Name and value in self.names:
yield self.tokentype, value
else:
yield ttype, value
class ErrorToken(Exception):
pass
class RaiseOnErrorTokenFilter(Filter):
"""
Raise an exception when the lexer generates an error token.
Options accepted:
`excclass` : Exception class
The exception class to raise.
The default is `pygments.filters.ErrorToken`.
*New in Pygments 0.8.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.exception = options.get('excclass', ErrorToken)
try:
# issubclass() will raise TypeError if first argument is not a class
if not issubclass(self.exception, Exception):
raise TypeError
except TypeError:
raise OptionError('excclass option is not an exception class')
def filter(self, lexer, stream):
for ttype, value in stream:
if ttype is Error:
raise self.exception(value)
yield ttype, value
class VisibleWhitespaceFilter(Filter):
"""
Convert tabs, newlines and/or spaces to visible characters.
Options accepted:
`spaces` : string or bool
If this is a one-character string, spaces will be replaces by this string.
If it is another true value, spaces will be replaced by ``·`` (unicode
MIDDLE DOT). If it is a false value, spaces will not be replaced. The
default is ``False``.
`tabs` : string or bool
The same as for `spaces`, but the default replacement character is ``»``
(unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
is ``False``. Note: this will not work if the `tabsize` option for the
lexer is nonzero, as tabs will already have been expanded then.
`tabsize` : int
If tabs are to be replaced by this filter (see the `tabs` option), this
is the total number of characters that a tab should be expanded to.
The default is ``8``.
`newlines` : string or bool
The same as for `spaces`, but the default replacement character is ``¶``
(unicode PILCROW SIGN). The default value is ``False``.
`wstokentype` : bool
If true, give whitespace the special `Whitespace` token type. This allows
styling the visible whitespace differently (e.g. greyed out), but it can
disrupt background colors. The default is ``True``.
*New in Pygments 0.8.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
for name, default in {'spaces': u'·', 'tabs': u'»', 'newlines': u'¶'}.items():
opt = options.get(name, False)
if isinstance(opt, basestring) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
tabsize = get_int_opt(options, 'tabsize', 8)
if self.tabs:
self.tabs += ' '*(tabsize-1)
if self.newlines:
self.newlines += '\n'
self.wstt = get_bool_opt(options, 'wstokentype', True)
def filter(self, lexer, stream):
if self.wstt:
spaces = self.spaces or ' '
tabs = self.tabs or '\t'
newlines = self.newlines or '\n'
regex = re.compile(r'\s')
def replacefunc(wschar):
if wschar == ' ':
return spaces
elif wschar == '\t':
return tabs
elif wschar == '\n':
return newlines
return wschar
for ttype, value in stream:
for sttype, svalue in _replace_special(ttype, value, regex,
Whitespace, replacefunc):
yield sttype, svalue
else:
spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
# simpler processing
for ttype, value in stream:
if spaces:
value = value.replace(' ', spaces)
if tabs:
value = value.replace('\t', tabs)
if newlines:
value = value.replace('\n', newlines)
yield ttype, value
class GobbleFilter(Filter):
"""
Gobbles source code lines (eats initial characters).
This filter drops the first ``n`` characters off every line of code. This
may be useful when the source code fed to the lexer is indented by a fixed
amount of space that isn't desired in the output.
Options accepted:
`n` : int
The number of characters to gobble.
*New in Pygments 1.2.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
self.n = get_int_opt(options, 'n', 0)
def gobble(self, value, left):
if left < len(value):
return value[left:], 0
else:
return '', left - len(value)
def filter(self, lexer, stream):
n = self.n
left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
(parts[0], left) = self.gobble(parts[0], left)
for i in range(1, len(parts)):
(parts[i], left) = self.gobble(parts[i], n)
value = '\n'.join(parts)
if value != '':
yield ttype, value
class TokenMergeFilter(Filter):
"""
Merges consecutive tokens with the same token type in the output stream of a
lexer.
*New in Pygments 1.2.*
"""
def __init__(self, **options):
Filter.__init__(self, **options)
def filter(self, lexer, stream):
current_type = None
current_value = None
for ttype, value in stream:
if ttype is current_type:
current_value += value
else:
if current_type is not None:
yield current_type, current_value
current_type = ttype
current_value = value
if current_type is not None:
yield current_type, current_value
FILTERS = {
'codetagify': CodeTagFilter,
'keywordcase': KeywordCaseFilter,
'highlight': NameHighlightFilter,
'raiseonerror': RaiseOnErrorTokenFilter,
'whitespace': VisibleWhitespaceFilter,
'gobble': GobbleFilter,
'tokenmerge': TokenMergeFilter,
}
|
GitHublong/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/m2m_signals/tests.py | 150 | """
Testing signals emitted on changing m2m relations.
"""
from .models import Person
from django.db import models
from django.test import TestCase
from .models import Part, Car, SportsCar, Person
class ManyToManySignalsTest(TestCase):
def m2m_changed_signal_receiver(self, signal, sender, **kwargs):
message = {
'instance': kwargs['instance'],
'action': kwargs['action'],
'reverse': kwargs['reverse'],
'model': kwargs['model'],
}
if kwargs['pk_set']:
message['objects'] = list(
kwargs['model'].objects.filter(pk__in=kwargs['pk_set'])
)
self.m2m_changed_messages.append(message)
def setUp(self):
self.m2m_changed_messages = []
self.vw = Car.objects.create(name='VW')
self.bmw = Car.objects.create(name='BMW')
self.toyota = Car.objects.create(name='Toyota')
self.wheelset = Part.objects.create(name='Wheelset')
self.doors = Part.objects.create(name='Doors')
self.engine = Part.objects.create(name='Engine')
self.airbag = Part.objects.create(name='Airbag')
self.sunroof = Part.objects.create(name='Sunroof')
self.alice = Person.objects.create(name='Alice')
self.bob = Person.objects.create(name='Bob')
self.chuck = Person.objects.create(name='Chuck')
self.daisy = Person.objects.create(name='Daisy')
def tearDown(self):
# disconnect all signal handlers
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def test_m2m_relations_add_remove_clear(self):
expected_messages = []
# Install a listener on one of the two m2m relations.
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
# Test the add, remove and clear methods on both sides of the
# many-to-many relation
# adding a default part to our car - no signal listener installed
self.vw.default_parts.add(self.sunroof)
# Now install a listener
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
self.vw.default_parts.add(self.wheelset, self.doors, self.engine)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the BMW and Toyata some doors as well
self.doors.car_set.add(self.bmw, self.toyota)
expected_messages.append({
'instance': self.doors,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.doors,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# remove the engine from the self.vw and the airbag (which is not set
# but is returned)
self.vw.default_parts.remove(self.engine, self.airbag)
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the self.vw some optional parts (second relation to same model)
self.vw.optional_parts.add(self.airbag, self.sunroof)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# add airbag to all the cars (even though the self.vw already has one)
self.airbag.cars_optional.add(self.vw, self.bmw, self.toyota)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# remove airbag from the self.vw (reverse relation with custom
# related_name)
self.airbag.cars_optional.remove(self.vw)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# clear all parts of the self.vw
self.vw.default_parts.clear()
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# take all the doors off of cars
self.doors.car_set.clear()
expected_messages.append({
'instance': self.doors,
'action': 'pre_clear',
'reverse': True,
'model': Car,
})
expected_messages.append({
'instance': self.doors,
'action': 'post_clear',
'reverse': True,
'model': Car,
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# take all the airbags off of cars (clear reverse relation with custom
# related_name)
self.airbag.cars_optional.clear()
expected_messages.append({
'instance': self.airbag,
'action': 'pre_clear',
'reverse': True,
'model': Car,
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_clear',
'reverse': True,
'model': Car,
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# alternative ways of setting relation:
self.vw.default_parts.create(name='Windows')
p6 = Part.objects.get(name='Windows')
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# direct assignment clears the set first, then adds
self.vw.default_parts = [self.wheelset,self.doors,self.engine]
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# Check that signals still work when model inheritance is involved
c4 = SportsCar.objects.create(name='Bugatti', price='1000000')
c4b = Car.objects.get(name='Bugatti')
c4.default_parts = [self.doors]
expected_messages.append({
'instance': c4,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': c4,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': c4,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
expected_messages.append({
'instance': c4,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.engine.car_set.add(c4)
expected_messages.append({
'instance': self.engine,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
expected_messages.append({
'instance': self.engine,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_with_self(self):
expected_messages = []
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.friends.through
)
self.alice.friends = [self.bob, self.chuck]
expected_messages.append({
'instance': self.alice,
'action': 'pre_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'post_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
})
expected_messages.append({
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.alice.fans = [self.daisy]
expected_messages.append({
'instance': self.alice,
'action': 'pre_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'post_clear',
'reverse': False,
'model': Person,
})
expected_messages.append({
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
})
expected_messages.append({
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.chuck.idols = [self.alice,self.bob]
expected_messages.append({
'instance': self.chuck,
'action': 'pre_clear',
'reverse': True,
'model': Person,
})
expected_messages.append({
'instance': self.chuck,
'action': 'post_clear',
'reverse': True,
'model': Person,
})
expected_messages.append({
'instance': self.chuck,
'action': 'pre_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
})
expected_messages.append({
'instance': self.chuck,
'action': 'post_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
|
erkrishna9/odoo | refs/heads/master | addons/account/report/account_entries_report.py | 16 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
import openerp.addons.decimal_precision as dp
class account_entries_report(osv.osv):
_name = "account.entries.report"
_description = "Journal Items Analysis"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.date('Effective Date', readonly=True),
'date_created': fields.date('Date Created', readonly=True),
'date_maturity': fields.date('Date Maturity', readonly=True),
'ref': fields.char('Reference', readonly=True),
'nbr': fields.integer('# of Items', readonly=True),
'debit': fields.float('Debit', readonly=True),
'credit': fields.float('Credit', readonly=True),
'balance': fields.float('Balance', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'year': fields.char('Year', size=4, readonly=True),
'date': fields.date('Date', size=128, readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'amount_currency': fields.float('Amount Currency', digits_compute=dp.get_precision('Account'), readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True),
'period_id': fields.many2one('account.period', 'Period', readonly=True),
'account_id': fields.many2one('account.account', 'Account', readonly=True),
'journal_id': fields.many2one('account.journal', 'Journal', readonly=True),
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', readonly=True),
'move_state': fields.selection([('draft','Unposted'), ('posted','Posted')], 'Status', readonly=True),
'move_line_state': fields.selection([('draft','Unbalanced'), ('valid','Valid')], 'State of Move Line', readonly=True),
'reconcile_id': fields.many2one('account.move.reconcile', readonly=True),
'partner_id': fields.many2one('res.partner','Partner', readonly=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Products Quantity', digits=(16,2), readonly=True),
'user_type': fields.many2one('account.account.type', 'Account Type', readonly=True),
'type': fields.selection([
('receivable', 'Receivable'),
('payable', 'Payable'),
('cash', 'Cash'),
('view', 'View'),
('consolidation', 'Consolidation'),
('other', 'Regular'),
('closed', 'Closed'),
], 'Internal Type', readonly=True, help="This type is used to differentiate types with "\
"special effects in OpenERP: view can not have entries, consolidation are accounts that "\
"can have children accounts for multi-company consolidations, payable/receivable are for "\
"partners accounts (for debit/credit computations), closed for depreciated accounts."),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
}
_order = 'date desc'
def search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False):
fiscalyear_obj = self.pool.get('account.fiscalyear')
period_obj = self.pool.get('account.period')
for arg in args:
if arg[0] == 'period_id' and arg[2] == 'current_period':
current_period = period_obj.find(cr, uid, context=context)[0]
args.append(['period_id','in',[current_period]])
break
elif arg[0] == 'period_id' and arg[2] == 'current_year':
current_year = fiscalyear_obj.find(cr, uid)
ids = fiscalyear_obj.read(cr, uid, [current_year], ['period_ids'])[0]['period_ids']
args.append(['period_id','in',ids])
for a in [['period_id','in','current_year'], ['period_id','in','current_period']]:
if a in args:
args.remove(a)
return super(account_entries_report, self).search(cr, uid, args=args, offset=offset, limit=limit, order=order,
context=context, count=count)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False,lazy=True):
if context is None:
context = {}
fiscalyear_obj = self.pool.get('account.fiscalyear')
period_obj = self.pool.get('account.period')
if context.get('period', False) == 'current_period':
current_period = period_obj.find(cr, uid, context=context)[0]
domain.append(['period_id','in',[current_period]])
elif context.get('year', False) == 'current_year':
current_year = fiscalyear_obj.find(cr, uid)
ids = fiscalyear_obj.read(cr, uid, [current_year], ['period_ids'])[0]['period_ids']
domain.append(['period_id','in',ids])
else:
domain = domain
return super(account_entries_report, self).read_group(cr, uid, domain, fields, groupby, offset, limit, context, orderby,lazy)
def init(self, cr):
tools.drop_view_if_exists(cr, 'account_entries_report')
cr.execute("""
create or replace view account_entries_report as (
select
l.id as id,
am.date as date,
l.date_maturity as date_maturity,
l.date_created as date_created,
am.ref as ref,
am.state as move_state,
l.state as move_line_state,
l.reconcile_id as reconcile_id,
to_char(am.date, 'YYYY') as year,
to_char(am.date, 'MM') as month,
to_char(am.date, 'YYYY-MM-DD') as day,
l.partner_id as partner_id,
l.product_id as product_id,
l.product_uom_id as product_uom_id,
am.company_id as company_id,
am.journal_id as journal_id,
p.fiscalyear_id as fiscalyear_id,
am.period_id as period_id,
l.account_id as account_id,
l.analytic_account_id as analytic_account_id,
a.type as type,
a.user_type as user_type,
1 as nbr,
l.quantity as quantity,
l.currency_id as currency_id,
l.amount_currency as amount_currency,
l.debit as debit,
l.credit as credit,
l.debit-l.credit as balance
from
account_move_line l
left join account_account a on (l.account_id = a.id)
left join account_move am on (am.id=l.move_id)
left join account_period p on (am.period_id=p.id)
where l.state != 'draft'
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
resba/gnuradio | refs/heads/master | gr-noaa/apps/usrp_rx_hrpt_nogui.py | 6 | #!/usr/bin/env python
##################################################
# Gnuradio Python Flow Graph
# Title: USRP HRPT Receiver
# Generated: Thu Oct 27 13:49:01 2011
##################################################
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import noaa
from gnuradio import uhd
from gnuradio.eng_option import eng_option
from gnuradio.gr import firdes
from optparse import OptionParser
import ConfigParser
import math, os
class usrp_rx_hrpt_nogui(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, "USRP HRPT Receiver")
##################################################
# Variables
##################################################
self.sym_rate = sym_rate = 600*1109
self.sample_rate = sample_rate = 4e6
self.sps = sps = sample_rate/sym_rate
self.config_filename = config_filename = os.environ['HOME']+'/.gnuradio/config.conf'
self._pll_alpha_config = ConfigParser.ConfigParser()
self._pll_alpha_config.read(config_filename)
try: pll_alpha = self._pll_alpha_config.getfloat('usrp_rx_hrpt', 'pll_alpha')
except: pll_alpha = 0.01
self.pll_alpha = pll_alpha
self._output_filename_config = ConfigParser.ConfigParser()
self._output_filename_config.read(config_filename)
try: output_filename = self._output_filename_config.get('usrp_rx_hrpt', 'filename')
except: output_filename = 'frames.hrpt'
self.output_filename = output_filename
self.max_clock_offset = max_clock_offset = 100e-6
self.max_carrier_offset = max_carrier_offset = 2*math.pi*100e3/sample_rate
self.hs = hs = int(sps/2.0)
self._gain_config = ConfigParser.ConfigParser()
self._gain_config.read(config_filename)
try: gain = self._gain_config.getfloat('usrp_rx_hrpt', 'gain')
except: gain = 35
self.gain = gain
self._freq_config = ConfigParser.ConfigParser()
self._freq_config.read(config_filename)
try: freq = self._freq_config.getfloat('usrp_rx_hrpt', 'freq')
except: freq = 1698e6
self.freq = freq
self._clock_alpha_config = ConfigParser.ConfigParser()
self._clock_alpha_config.read(config_filename)
try: clock_alpha = self._clock_alpha_config.getfloat('usrp_rx_hrpt', 'clock_alpha')
except: clock_alpha = 0.01
self.clock_alpha = clock_alpha
self._addr_config = ConfigParser.ConfigParser()
self._addr_config.read(config_filename)
try: addr = self._addr_config.get('usrp_rx_hrpt', 'addr')
except: addr = ""
self.addr = addr
##################################################
# Blocks
##################################################
self.uhd_usrp_source_0 = uhd.usrp_source(
device_addr=addr,
io_type=uhd.io_type.COMPLEX_FLOAT32,
num_channels=1,
)
self.uhd_usrp_source_0.set_samp_rate(sample_rate)
self.uhd_usrp_source_0.set_center_freq(freq, 0)
self.uhd_usrp_source_0.set_gain(0, 0)
self.pll = noaa.hrpt_pll_cf(pll_alpha, pll_alpha**2/4.0, max_carrier_offset)
self.gr_moving_average_xx_0 = gr.moving_average_ff(hs, 1.0/hs, 4000)
self.frame_sink = gr.file_sink(gr.sizeof_short*1, output_filename)
self.frame_sink.set_unbuffered(False)
self.digital_clock_recovery_mm_xx_0 = digital.clock_recovery_mm_ff(sps/2.0, clock_alpha**2/4.0, 0.5, clock_alpha, max_clock_offset)
self.digital_binary_slicer_fb_0 = digital.binary_slicer_fb()
self.deframer = noaa.hrpt_deframer()
self.decoder = noaa.hrpt_decoder(True,True)
self.agc = gr.agc_cc(1e-6, 1.0, 1.0, 1.0)
##################################################
# Connections
##################################################
self.connect((self.gr_moving_average_xx_0, 0), (self.digital_clock_recovery_mm_xx_0, 0))
self.connect((self.pll, 0), (self.gr_moving_average_xx_0, 0))
self.connect((self.uhd_usrp_source_0, 0), (self.agc, 0))
self.connect((self.agc, 0), (self.pll, 0))
self.connect((self.digital_binary_slicer_fb_0, 0), (self.deframer, 0))
self.connect((self.digital_clock_recovery_mm_xx_0, 0), (self.digital_binary_slicer_fb_0, 0))
self.connect((self.deframer, 0), (self.decoder, 0))
self.connect((self.deframer, 0), (self.frame_sink, 0))
def get_sym_rate(self):
return self.sym_rate
def set_sym_rate(self, sym_rate):
self.sym_rate = sym_rate
self.set_sps(self.sample_rate/self.sym_rate)
def get_sample_rate(self):
return self.sample_rate
def set_sample_rate(self, sample_rate):
self.sample_rate = sample_rate
self.set_max_carrier_offset(2*math.pi*100e3/self.sample_rate)
self.set_sps(self.sample_rate/self.sym_rate)
self.uhd_usrp_source_0.set_samp_rate(self.sample_rate)
def get_sps(self):
return self.sps
def set_sps(self, sps):
self.sps = sps
self.set_hs(int(self.sps/2.0))
self.digital_clock_recovery_mm_xx_0.set_omega(self.sps/2.0)
def get_config_filename(self):
return self.config_filename
def set_config_filename(self, config_filename):
self.config_filename = config_filename
self._clock_alpha_config = ConfigParser.ConfigParser()
self._clock_alpha_config.read(self.config_filename)
if not self._clock_alpha_config.has_section('usrp_rx_hrpt'):
self._clock_alpha_config.add_section('usrp_rx_hrpt')
self._clock_alpha_config.set('usrp_rx_hrpt', 'clock_alpha', str(self.clock_alpha))
self._clock_alpha_config.write(open(self.config_filename, 'w'))
self._pll_alpha_config = ConfigParser.ConfigParser()
self._pll_alpha_config.read(self.config_filename)
if not self._pll_alpha_config.has_section('usrp_rx_hrpt'):
self._pll_alpha_config.add_section('usrp_rx_hrpt')
self._pll_alpha_config.set('usrp_rx_hrpt', 'pll_alpha', str(self.pll_alpha))
self._pll_alpha_config.write(open(self.config_filename, 'w'))
self._gain_config = ConfigParser.ConfigParser()
self._gain_config.read(self.config_filename)
if not self._gain_config.has_section('usrp_rx_hrpt'):
self._gain_config.add_section('usrp_rx_hrpt')
self._gain_config.set('usrp_rx_hrpt', 'gain', str(self.gain))
self._gain_config.write(open(self.config_filename, 'w'))
self._freq_config = ConfigParser.ConfigParser()
self._freq_config.read(self.config_filename)
if not self._freq_config.has_section('usrp_rx_hrpt'):
self._freq_config.add_section('usrp_rx_hrpt')
self._freq_config.set('usrp_rx_hrpt', 'freq', str(self.freq))
self._freq_config.write(open(self.config_filename, 'w'))
self._output_filename_config = ConfigParser.ConfigParser()
self._output_filename_config.read(self.config_filename)
if not self._output_filename_config.has_section('usrp_rx_hrpt'):
self._output_filename_config.add_section('usrp_rx_hrpt')
self._output_filename_config.set('usrp_rx_hrpt', 'filename', str(self.output_filename))
self._output_filename_config.write(open(self.config_filename, 'w'))
self._addr_config = ConfigParser.ConfigParser()
self._addr_config.read(self.config_filename)
if not self._addr_config.has_section('usrp_rx_hrpt'):
self._addr_config.add_section('usrp_rx_hrpt')
self._addr_config.set('usrp_rx_hrpt', 'addr', str(self.addr))
self._addr_config.write(open(self.config_filename, 'w'))
def get_pll_alpha(self):
return self.pll_alpha
def set_pll_alpha(self, pll_alpha):
self.pll_alpha = pll_alpha
self._pll_alpha_config = ConfigParser.ConfigParser()
self._pll_alpha_config.read(self.config_filename)
if not self._pll_alpha_config.has_section('usrp_rx_hrpt'):
self._pll_alpha_config.add_section('usrp_rx_hrpt')
self._pll_alpha_config.set('usrp_rx_hrpt', 'pll_alpha', str(self.pll_alpha))
self._pll_alpha_config.write(open(self.config_filename, 'w'))
self.pll.set_alpha(self.pll_alpha)
self.pll.set_beta(self.pll_alpha**2/4.0)
def get_output_filename(self):
return self.output_filename
def set_output_filename(self, output_filename):
self.output_filename = output_filename
self._output_filename_config = ConfigParser.ConfigParser()
self._output_filename_config.read(self.config_filename)
if not self._output_filename_config.has_section('usrp_rx_hrpt'):
self._output_filename_config.add_section('usrp_rx_hrpt')
self._output_filename_config.set('usrp_rx_hrpt', 'filename', str(self.output_filename))
self._output_filename_config.write(open(self.config_filename, 'w'))
def get_max_clock_offset(self):
return self.max_clock_offset
def set_max_clock_offset(self, max_clock_offset):
self.max_clock_offset = max_clock_offset
def get_max_carrier_offset(self):
return self.max_carrier_offset
def set_max_carrier_offset(self, max_carrier_offset):
self.max_carrier_offset = max_carrier_offset
self.pll.set_max_offset(self.max_carrier_offset)
def get_hs(self):
return self.hs
def set_hs(self, hs):
self.hs = hs
self.gr_moving_average_xx_0.set_length_and_scale(self.hs, 1.0/self.hs)
def get_gain(self):
return self.gain
def set_gain(self, gain):
self.gain = gain
self._gain_config = ConfigParser.ConfigParser()
self._gain_config.read(self.config_filename)
if not self._gain_config.has_section('usrp_rx_hrpt'):
self._gain_config.add_section('usrp_rx_hrpt')
self._gain_config.set('usrp_rx_hrpt', 'gain', str(self.gain))
self._gain_config.write(open(self.config_filename, 'w'))
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self._freq_config = ConfigParser.ConfigParser()
self._freq_config.read(self.config_filename)
if not self._freq_config.has_section('usrp_rx_hrpt'):
self._freq_config.add_section('usrp_rx_hrpt')
self._freq_config.set('usrp_rx_hrpt', 'freq', str(self.freq))
self._freq_config.write(open(self.config_filename, 'w'))
self.uhd_usrp_source_0.set_center_freq(self.freq, 0)
def get_clock_alpha(self):
return self.clock_alpha
def set_clock_alpha(self, clock_alpha):
self.clock_alpha = clock_alpha
self._clock_alpha_config = ConfigParser.ConfigParser()
self._clock_alpha_config.read(self.config_filename)
if not self._clock_alpha_config.has_section('usrp_rx_hrpt'):
self._clock_alpha_config.add_section('usrp_rx_hrpt')
self._clock_alpha_config.set('usrp_rx_hrpt', 'clock_alpha', str(self.clock_alpha))
self._clock_alpha_config.write(open(self.config_filename, 'w'))
self.digital_clock_recovery_mm_xx_0.set_gain_omega(self.clock_alpha**2/4.0)
self.digital_clock_recovery_mm_xx_0.set_gain_mu(self.clock_alpha)
def get_addr(self):
return self.addr
def set_addr(self, addr):
self.addr = addr
self._addr_config = ConfigParser.ConfigParser()
self._addr_config.read(self.config_filename)
if not self._addr_config.has_section('usrp_rx_hrpt'):
self._addr_config.add_section('usrp_rx_hrpt')
self._addr_config.set('usrp_rx_hrpt', 'addr', str(self.addr))
self._addr_config.write(open(self.config_filename, 'w'))
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option, usage="%prog: [options]")
(options, args) = parser.parse_args()
tb = usrp_rx_hrpt_nogui()
tb.start()
raw_input('Press Enter to quit: ')
tb.stop()
|
OlafLee/theano_exercises | refs/heads/master | 02_advanced/01_symbolic/02_traverse_soln.py | 14 | import numpy as np
from theano.gof import Variable
from theano import tensor as T
def arg_to_softmax(prob):
"""
Oh no! Someone has passed you the probability output,
"prob", of a softmax function, and you want the unnormalized
log probability--the argument to the softmax.
Verify that prob really is the output of a softmax. Raise a
TypeError if it is not.
If it is, return the argument to the softmax.
"""
if not isinstance(prob, Variable):
raise TypeError()
if prob.owner is None:
raise TypeError()
owner = prob.owner
if not isinstance(owner.op, T.nnet.Softmax):
raise TypeError()
rval, = owner.inputs
return rval
if __name__ == "__main__":
x = np.ones((5, 4))
try:
arg_to_softmax(x)
raise Exception("You should have raised an error.")
except TypeError:
pass
x = T.matrix()
try:
arg_to_softmax(x)
raise Exception("You should have raised an error.")
except TypeError:
pass
y = T.nnet.sigmoid(x)
try:
arg_to_softmax(y)
raise Exception("You should have raised an error.")
except TypeError:
pass
y = T.nnet.softmax(x)
rval = arg_to_softmax(y)
assert rval is x
print "SUCCESS!"
|
pydoit/doit | refs/heads/master | tests/test_action.py | 2 | import os
import sys
import tempfile
import textwrap
import locale
locale # quiet pyflakes
from pathlib import PurePath, Path
from io import StringIO, BytesIO
from threading import Thread
import time
from sys import executable
from unittest.mock import Mock
import pytest
from doit import action
from doit.task import Task
from doit.exceptions import TaskError, TaskFailed
#path to test folder
TEST_PATH = os.path.dirname(__file__)
PROGRAM = "%s %s/sample_process.py" % (executable, TEST_PATH)
@pytest.fixture
def tmpfile(request):
temp = tempfile.TemporaryFile('w+', encoding="utf-8")
request.addfinalizer(temp.close)
return temp
############# CmdAction
class TestCmdAction(object):
# if nothing is raised it is successful
def test_success(self):
my_action = action.CmdAction(PROGRAM)
got = my_action.execute()
assert got is None
def test_success_noshell(self):
my_action = action.CmdAction(PROGRAM.split(), shell=False)
got = my_action.execute()
assert got is None
def test_error(self):
my_action = action.CmdAction("%s 1 2 3" % PROGRAM)
got = my_action.execute()
assert isinstance(got, TaskError)
def test_env(self):
env = os.environ.copy()
env['GELKIPWDUZLOVSXE'] = '1'
my_action = action.CmdAction("%s check env" % PROGRAM, env=env)
got = my_action.execute()
assert got is None
def test_failure(self):
my_action = action.CmdAction("%s please fail" % PROGRAM)
got = my_action.execute()
assert isinstance(got, TaskFailed)
def test_str(self):
my_action = action.CmdAction(PROGRAM)
assert "Cmd: %s" % PROGRAM == str(my_action)
def test_unicode(self):
action_str = PROGRAM + "中文"
my_action = action.CmdAction(action_str)
assert "Cmd: %s" % action_str == str(my_action)
def test_repr(self):
my_action = action.CmdAction(PROGRAM)
expected = "<CmdAction: '%s'>" % PROGRAM
assert expected == repr(my_action), repr(my_action)
def test_result(self):
my_action = action.CmdAction("%s 1 2" % PROGRAM)
my_action.execute()
assert "12" == my_action.result
def test_values(self):
# for cmdActions they are empty if save_out not specified
my_action = action.CmdAction("%s 1 2" % PROGRAM)
my_action.execute()
assert {} == my_action.values
class TestCmdActionParams(object):
def test_invalid_param_stdout(self):
pytest.raises(action.InvalidTask, action.CmdAction,
[PROGRAM], stdout=None)
def test_changePath(self, tmpdir):
path = tmpdir.mkdir("foo")
command = '%s -c "import os; print(os.getcwd())"' % executable
my_action = action.CmdAction(command, cwd=path.strpath)
my_action.execute()
assert path + os.linesep == my_action.out, repr(my_action.out)
def test_noPathSet(self, tmpdir):
path = tmpdir.mkdir("foo")
command = '%s -c "import os; print(os.getcwd())"' % executable
my_action = action.CmdAction(command)
my_action.execute()
assert path.strpath + os.linesep != my_action.out, repr(my_action.out)
class TestCmdVerbosity(object):
# Capture stderr
def test_captureStderr(self):
cmd = "%s please fail" % PROGRAM
my_action = action.CmdAction(cmd)
got = my_action.execute()
assert isinstance(got, TaskFailed)
assert "err output on failure" == my_action.err, repr(my_action.err)
# Capture stdout
def test_captureStdout(self):
my_action = action.CmdAction("%s hi_stdout hi2" % PROGRAM)
my_action.execute()
assert "hi_stdout" == my_action.out, repr(my_action.out)
# Do not capture stderr
# test using a tempfile. it is not possible (at least i dont know)
# how to test if the output went to the parent process,
# faking sys.stderr with a StringIO doesnt work.
def test_noCaptureStderr(self, tmpfile):
my_action = action.CmdAction("%s please fail" % PROGRAM)
action_result = my_action.execute(err=tmpfile)
assert isinstance(action_result, TaskFailed)
tmpfile.seek(0)
got = tmpfile.read()
assert "err output on failure" == got, repr(got)
assert "err output on failure" == my_action.err, repr(my_action.err)
# Do not capture stdout
def test_noCaptureStdout(self, tmpfile):
my_action = action.CmdAction("%s hi_stdout hi2" % PROGRAM)
my_action.execute(out=tmpfile)
tmpfile.seek(0)
got = tmpfile.read()
assert "hi_stdout" == got, repr(got)
assert "hi_stdout" == my_action.out, repr(my_action.out)
class TestCmdExpandAction(object):
def test_task_meta_reference(self):
cmd = "%s %s/myecho.py" % (executable, TEST_PATH)
cmd += " %(dependencies)s - %(changed)s - %(targets)s"
dependencies = ["data/dependency1", "data/dependency2"]
targets = ["data/target", "data/targetXXX"]
task = Task('Fake', [cmd], dependencies, targets)
task.dep_changed = ["data/dependency1"]
task.options = {}
my_action = task.actions[0]
assert my_action.execute() is None
got = my_action.out.split('-')
assert task.file_dep == set(got[0].split())
assert task.dep_changed == got[1].split()
assert targets == got[2].split()
def test_task_options(self):
cmd = "%s %s/myecho.py" % (executable, TEST_PATH)
cmd += " %(opt1)s - %(opt2)s"
task = Task('Fake', [cmd])
task.options = {'opt1':'3', 'opt2':'abc def'}
my_action = task.actions[0]
assert my_action.execute() is None
got = my_action.out.strip()
assert "3 - abc def" == got
def test_task_pos_arg(self):
cmd = "%s %s/myecho.py" % (executable, TEST_PATH)
cmd += " %(pos)s"
task = Task('Fake', [cmd], pos_arg='pos')
task.options = {}
task.pos_arg_val = ['hi', 'there']
my_action = task.actions[0]
assert my_action.execute() is None
got = my_action.out.strip()
assert "hi there" == got
def test_task_pos_arg_None(self):
# pos_arg_val is None when the task is not specified from
# command line but executed because it is a task_dep
cmd = "%s %s/myecho.py" % (executable, TEST_PATH)
cmd += " %(pos)s"
task = Task('Fake', [cmd], pos_arg='pos')
task.options = {}
my_action = task.actions[0]
assert my_action.execute() is None
got = my_action.out.strip()
assert "" == got
def test_callable_return_command_str(self):
def get_cmd(opt1, opt2):
cmd = "%s %s/myecho.py" % (executable, TEST_PATH)
return cmd + " %s - %s" % (opt1, opt2)
task = Task('Fake', [action.CmdAction(get_cmd)])
task.options = {'opt1':'3', 'opt2':'abc def'}
my_action = task.actions[0]
assert my_action.execute() is None
got = my_action.out.strip()
assert "3 - abc def" == got, repr(got)
def test_callable_tuple_return_command_str(self):
def get_cmd(opt1, opt2):
cmd = "%s %s/myecho.py" % (executable, TEST_PATH)
return cmd + " %s - %s" % (opt1, opt2)
task = Task('Fake',
[action.CmdAction((get_cmd, [], {'opt2':'abc def'}))])
task.options = {'opt1':'3'}
my_action = task.actions[0]
assert my_action.execute() is None
got = my_action.out.strip()
assert "3 - abc def" == got, repr(got)
def test_callable_invalid(self):
def get_cmd(blabla): pass
task = Task('Fake', [action.CmdAction(get_cmd)])
task.options = {'opt1':'3'}
my_action = task.actions[0]
got = my_action.execute()
assert isinstance(got, TaskError)
def test_string_list_cant_be_expanded(self):
cmd = [executable, "%s/myecho.py" % TEST_PATH]
task = Task('Fake', [cmd])
my_action = task.actions[0]
assert cmd == my_action.expand_action()
def test_list_can_contain_path(self):
cmd = [executable, PurePath(TEST_PATH), Path("myecho.py")]
task = Task('Fake', [cmd])
my_action = task.actions[0]
assert [executable, TEST_PATH, "myecho.py"] == my_action.expand_action()
def test_list_should_contain_strings_or_paths(self):
cmd = [executable, PurePath(TEST_PATH), 42, Path("myecho.py")]
task = Task('Fake', [cmd])
my_action = task.actions[0]
assert pytest.raises(action.InvalidTask, my_action.expand_action)
class TestCmdActionStringFormatting(object):
def test_old(self, monkeypatch):
monkeypatch.setattr(action.CmdAction, 'STRING_FORMAT', 'old')
cmd = "%s %s/myecho.py" % (executable, TEST_PATH)
cmd += " %(dependencies)s - %(opt1)s"
task = Task('Fake', [cmd], ['data/dependency1'])
task.options = {'opt1':'abc'}
my_action = task.actions[0]
assert my_action.execute() is None
got = my_action.out.strip()
assert "data/dependency1 - abc" == got
def test_new(self, monkeypatch):
monkeypatch.setattr(action.CmdAction, 'STRING_FORMAT', 'new')
cmd = "%s %s/myecho.py" % (executable, TEST_PATH)
cmd += " {dependencies} - {opt1}"
task = Task('Fake', [cmd], ['data/dependency1'])
task.options = {'opt1':'abc'}
my_action = task.actions[0]
assert my_action.execute() is None
got = my_action.out.strip()
assert "data/dependency1 - abc" == got
def test_both(self, monkeypatch):
monkeypatch.setattr(action.CmdAction, 'STRING_FORMAT', 'both')
cmd = "%s %s/myecho.py" % (executable, TEST_PATH)
cmd += " {dependencies} - %(opt1)s"
task = Task('Fake', [cmd], ['data/dependency1'])
task.options = {'opt1':'abc'}
my_action = task.actions[0]
assert my_action.execute() is None
got = my_action.out.strip()
assert "data/dependency1 - abc" == got
class TestCmd_print_process_output_line(object):
def test_non_unicode_string_error_strict(self):
my_action = action.CmdAction("", decode_error='strict')
not_unicode = BytesIO('\xa9'.encode("latin-1"))
realtime = Mock()
realtime.encoding = 'utf-8'
pytest.raises(UnicodeDecodeError,
my_action._print_process_output,
Mock(), not_unicode, Mock(), realtime)
def test_non_unicode_string_error_replace(self):
my_action = action.CmdAction("") # default is decode_error = 'replace'
not_unicode = BytesIO('\xa9'.encode("latin-1"))
realtime = Mock()
realtime.encoding = 'utf-8'
capture = StringIO()
my_action._print_process_output(
Mock(), not_unicode, capture, realtime)
# get the replacement char
expected = '�'
assert expected == capture.getvalue()
def test_non_unicode_string_ok(self):
my_action = action.CmdAction("", encoding='iso-8859-1')
not_unicode = BytesIO('\xa9'.encode("latin-1"))
realtime = Mock()
realtime.encoding = 'utf-8'
capture = StringIO()
my_action._print_process_output(
Mock(), not_unicode, capture, realtime)
# get the correct char from latin-1 encoding
expected = '©'
assert expected == capture.getvalue()
# dont test unicode if system locale doesnt support unicode
# see https://bitbucket.org/schettino72/doit/pull-request/11
@pytest.mark.skipif('locale.getlocale()[1] is None')
def test_unicode_string(self, tmpfile):
my_action = action.CmdAction("")
unicode_in = tempfile.TemporaryFile('w+b')
unicode_in.write(" 中文".encode('utf-8'))
unicode_in.seek(0)
my_action._print_process_output(
Mock(), unicode_in, Mock(), tmpfile)
@pytest.mark.skipif('locale.getlocale()[1] is None')
def test_unicode_string2(self, tmpfile):
# this \uXXXX has a different behavior!
my_action = action.CmdAction("")
unicode_in = tempfile.TemporaryFile('w+b')
unicode_in.write(" 中文 \u2018".encode('utf-8'))
unicode_in.seek(0)
my_action._print_process_output(
Mock(), unicode_in, Mock(), tmpfile)
def test_line_buffered_output(self):
my_action = action.CmdAction("")
out, inp = os.pipe()
out, inp = os.fdopen(out, 'rb'), os.fdopen(inp, 'wb')
inp.write('abcd\nline2'.encode('utf-8'))
inp.flush()
capture = StringIO()
thread = Thread(target=my_action._print_process_output,
args=(Mock(), out, capture, None))
thread.start()
time.sleep(0.1)
try:
got = capture.getvalue()
# 'line2' is not captured because of line buffering
assert 'abcd\n' == got
print('asserted')
finally:
inp.close()
def test_unbuffered_output(self):
my_action = action.CmdAction("", buffering=1)
out, inp = os.pipe()
out, inp = os.fdopen(out, 'rb'), os.fdopen(inp, 'wb')
inp.write('abcd\nline2'.encode('utf-8'))
inp.flush()
capture = StringIO()
thread = Thread(target=my_action._print_process_output,
args=(Mock(), out, capture, None))
thread.start()
time.sleep(0.1)
try:
got = capture.getvalue()
assert 'abcd\nline2' == got
finally:
inp.close()
def test_unbuffered_env(self, monkeypatch):
my_action = action.CmdAction("", buffering=1)
proc_mock = Mock()
proc_mock.configure_mock(returncode=0)
popen_mock = Mock(return_value=proc_mock)
from doit.action import subprocess
monkeypatch.setattr(subprocess, 'Popen', popen_mock)
my_action._print_process_output = Mock()
my_action.execute()
env = popen_mock.call_args[-1]['env']
assert env and env.get('PYTHONUNBUFFERED', False) == '1'
class TestCmdSaveOuput(object):
def test_success(self):
TEST_PATH = os.path.dirname(__file__)
PROGRAM = "%s %s/sample_process.py" % (executable, TEST_PATH)
my_action = action.CmdAction(PROGRAM + " x1 x2", save_out='out')
my_action.execute()
assert {'out': 'x1'} == my_action.values
class TestWriter(object):
def test_write(self):
w1 = StringIO()
w2 = StringIO()
writer = action.Writer(w1, w2)
writer.flush() # make sure flush is supported
writer.write("hello")
assert "hello" == w1.getvalue()
assert "hello" == w2.getvalue()
def test_isatty_true(self):
w1 = StringIO()
w1.isatty = lambda: True
w2 = StringIO()
writer = action.Writer(w1, w2)
assert not writer.isatty()
def test_isatty_false(self):
w1 = StringIO()
w1.isatty = lambda: True
w2 = StringIO()
w2.isatty = lambda: True
writer = action.Writer(w1, w2)
assert writer.isatty()
def test_isatty_overwrite_yes(self):
w1 = StringIO()
w1.isatty = lambda: True
w2 = StringIO()
writer = action.Writer(w1)
writer.add_writer(w2, True)
def test_isatty_overwrite_no(self):
w1 = StringIO()
w1.isatty = lambda: True
w2 = StringIO()
w2.isatty = lambda: True
writer = action.Writer(w1)
writer.add_writer(w2, False)
############# PythonAction
class TestPythonAction(object):
def test_success_bool(self):
def success_sample():return True
my_action = action.PythonAction(success_sample)
# nothing raised it was successful
my_action.execute()
def test_success_None(self):
def success_sample():return
my_action = action.PythonAction(success_sample)
# nothing raised it was successful
my_action.execute()
def test_success_str(self):
def success_sample():return ""
my_action = action.PythonAction(success_sample)
# nothing raised it was successful
my_action.execute()
def test_success_dict(self):
def success_sample():return {}
my_action = action.PythonAction(success_sample)
# nothing raised it was successful
my_action.execute()
def test_error_object(self):
# anthing but None, bool, string or dict
def error_sample(): return object()
my_action = action.PythonAction(error_sample)
got = my_action.execute()
assert isinstance(got, TaskError)
def test_error_taskfail(self):
# should get the same exception as was returned from the
# user's function
def error_sample(): return TaskFailed("too bad")
ye_olde_action = action.PythonAction(error_sample)
ret = ye_olde_action.execute()
assert isinstance(ret, TaskFailed)
assert str(ret).endswith("too bad\n")
def test_error_taskerror(self):
def error_sample(): return TaskError("so sad")
ye_olde_action = action.PythonAction(error_sample)
ret = ye_olde_action.execute()
assert str(ret).endswith("so sad\n")
def test_error_exception(self):
def error_sample(): raise Exception("asdf")
my_action = action.PythonAction(error_sample)
got = my_action.execute()
assert isinstance(got, TaskError)
def test_fail_bool(self):
def fail_sample():return False
my_action = action.PythonAction(fail_sample)
got = my_action.execute()
assert isinstance(got, TaskFailed)
# any callable should work, not only functions
def test_callable_obj(self):
class CallMe:
def __call__(self):
return False
my_action = action.PythonAction(CallMe())
got = my_action.execute()
assert isinstance(got, TaskFailed)
# helper to test callable with parameters
def _func_par(self,par1,par2,par3=5):
if par1 == par2 and par3 > 10:
return True
else:
return False
def test_init(self):
# default values
action1 = action.PythonAction(self._func_par)
assert action1.args == []
assert action1.kwargs == {}
# not a callable
pytest.raises(action.InvalidTask, action.PythonAction, "abc")
# args not a list
pytest.raises(action.InvalidTask, action.PythonAction, self._func_par, "c")
# kwargs not a list
pytest.raises(action.InvalidTask, action.PythonAction,
self._func_par, None, "a")
# cant use a class as callable
def test_init_callable_class(self):
class CallMe(object):
pass
pytest.raises(action.InvalidTask, action.PythonAction, CallMe)
# cant use built-ins
def test_init_callable_builtin(self):
pytest.raises(action.InvalidTask, action.PythonAction, any)
def test_functionParametersArgs(self):
my_action = action.PythonAction(self._func_par,args=(2,2,25))
my_action.execute()
def test_functionParametersKwargs(self):
my_action = action.PythonAction(self._func_par,
kwargs={'par1':2,'par2':2,'par3':25})
my_action.execute()
def test_functionParameters(self):
my_action = action.PythonAction(self._func_par,args=(2,2),
kwargs={'par3':25})
my_action.execute()
def test_functionParametersFail(self):
my_action = action.PythonAction(self._func_par, args=(2,3),
kwargs={'par3':25})
got = my_action.execute()
assert isinstance(got, TaskFailed)
def test_str(self):
def str_sample(): return True
my_action = action.PythonAction(str_sample)
assert "Python: function" in str(my_action)
assert "str_sample" in str(my_action)
def test_repr(self):
def repr_sample(): return True
my_action = action.PythonAction(repr_sample)
assert "<PythonAction: '%s'>" % repr(repr_sample) == repr(my_action)
def test_result(self):
def vvv(): return "my value"
my_action = action.PythonAction(vvv)
my_action.execute()
assert "my value" == my_action.result
def test_result_dict(self):
def vvv(): return {'xxx': "my value"}
my_action = action.PythonAction(vvv)
my_action.execute()
assert {'xxx': "my value"} == my_action.result
def test_values(self):
def vvv(): return {'x': 5, 'y':10}
my_action = action.PythonAction(vvv)
my_action.execute()
assert {'x': 5, 'y':10} == my_action.values
class TestPythonVerbosity(object):
def write_stderr(self):
sys.stderr.write("this is stderr S\n")
def write_stdout(self):
sys.stdout.write("this is stdout S\n")
def test_captureStderr(self):
my_action = action.PythonAction(self.write_stderr)
my_action.execute()
assert "this is stderr S\n" == my_action.err, repr(my_action.err)
def test_captureStdout(self):
my_action = action.PythonAction(self.write_stdout)
my_action.execute()
assert "this is stdout S\n" == my_action.out, repr(my_action.out)
def test_noCaptureStderr(self, capsys):
my_action = action.PythonAction(self.write_stderr)
my_action.execute(err=sys.stderr)
got = capsys.readouterr()[1]
assert "this is stderr S\n" == got, repr(got)
def test_noCaptureStdout(self, capsys):
my_action = action.PythonAction(self.write_stdout)
my_action.execute(out=sys.stdout)
got = capsys.readouterr()[0]
assert "this is stdout S\n" == got, repr(got)
def test_redirectStderr(self):
tmpfile = tempfile.TemporaryFile('w+')
my_action = action.PythonAction(self.write_stderr)
my_action.execute(err=tmpfile)
tmpfile.seek(0)
got = tmpfile.read()
tmpfile.close()
assert "this is stderr S\n" == got, got
def test_redirectStdout(self):
tmpfile = tempfile.TemporaryFile('w+')
my_action = action.PythonAction(self.write_stdout)
my_action.execute(out=tmpfile)
tmpfile.seek(0)
got = tmpfile.read()
tmpfile.close()
assert "this is stdout S\n" == got, got
class TestPythonActionPrepareKwargsMeta(object):
def test_no_extra_args(self):
# no error trying to inject values
def py_callable():
return True
task = Task('Fake', [py_callable], file_dep=['dependencies'])
task.options = {}
my_action = task.actions[0]
my_action.execute()
def test_keyword_extra_args(self):
got = []
def py_callable(arg=None, **kwargs):
got.append(kwargs)
my_task = Task('Fake', [(py_callable, (), {'b': 4})],
file_dep=['dependencies'])
my_task.options = {'foo': 'bar'}
my_action = my_task.actions[0]
my_action.execute()
# meta args do not leak into kwargs
assert got == [{'foo': 'bar', 'b': 4}]
def test_named_extra_args(self):
got = []
def py_callable(targets, dependencies, changed, task):
got.append(targets)
got.append(dependencies)
got.append(changed)
got.append(task)
task = Task('Fake', [py_callable], file_dep=['dependencies'],
targets=['targets'])
task.dep_changed = ['changed']
task.options = {}
my_action = task.actions[0]
my_action.execute()
assert got == [['targets'], ['dependencies'], ['changed'],
task]
def test_mixed_args(self):
got = []
def py_callable(a, b, changed):
got.append(a)
got.append(b)
got.append(changed)
task = Task('Fake', [(py_callable, ('a', 'b'))])
task.options = {}
task.dep_changed = ['changed']
my_action = task.actions[0]
my_action.execute()
assert got == ['a', 'b', ['changed']]
def test_extra_arg_overwritten(self):
got = []
def py_callable(a, b, changed):
got.append(a)
got.append(b)
got.append(changed)
task = Task('Fake', [(py_callable, ('a', 'b', 'c'))])
task.dep_changed = ['changed']
task.options = {}
my_action = task.actions[0]
my_action.execute()
assert got == ['a', 'b', 'c']
def test_extra_kwarg_overwritten(self):
got = []
def py_callable(a, b, **kwargs):
got.append(a)
got.append(b)
got.append(kwargs['changed'])
task = Task('Fake', [(py_callable, ('a', 'b'), {'changed': 'c'})])
task.options = {}
task.dep_changed = ['changed']
my_action = task.actions[0]
my_action.execute()
assert got == ['a', 'b', 'c']
def test_meta_arg_default_disallowed(self):
def py_callable(a, b, changed=None): pass
task = Task('Fake', [(py_callable, ('a', 'b'))])
task.options = {}
task.dep_changed = ['changed']
my_action = task.actions[0]
pytest.raises(action.InvalidTask, my_action.execute)
def test_callable_obj(self):
got = []
class CallMe(object):
def __call__(self, a, b, changed):
got.append(a)
got.append(b)
got.append(changed)
task = Task('Fake', [(CallMe(), ('a', 'b'))])
task.options = {}
task.dep_changed = ['changed']
my_action = task.actions[0]
my_action.execute()
assert got == ['a', 'b', ['changed']]
def test_method(self):
got = []
class CallMe(object):
def xxx(self, a, b, changed):
got.append(a)
got.append(b)
got.append(changed)
task = Task('Fake', [(CallMe().xxx, ('a', 'b'))])
task.options = {}
task.dep_changed = ['changed']
my_action = task.actions[0]
my_action.execute()
assert got == ['a', 'b', ['changed']]
def test_task_options(self):
got = []
def py_callable(opt1, opt3):
got.append(opt1)
got.append(opt3)
task = Task('Fake', [py_callable])
task.options = {'opt1':'1', 'opt2':'abc def', 'opt3':3}
my_action = task.actions[0]
my_action.execute()
assert ['1',3] == got, repr(got)
def test_task_pos_arg(self):
got = []
def py_callable(pos):
got.append(pos)
task = Task('Fake', [py_callable], pos_arg='pos')
task.options = {}
task.pos_arg_val = ['hi', 'there']
my_action = task.actions[0]
my_action.execute()
assert [['hi', 'there']] == got, repr(got)
def test_option_default_allowed(self):
got = []
def py_callable(opt2='ABC'):
got.append(opt2)
task = Task('Fake', [py_callable])
task.options = {'opt2':'123'}
my_action = task.actions[0]
my_action.execute()
assert ['123'] == got, repr(got)
def test_kwonlyargs_minimal(self):
got = []
scope = {'got': got}
exec(textwrap.dedent('''
def py_callable(*args, kwonly=None):
got.append(args)
got.append(kwonly)
'''), scope)
task = Task('Fake', [(scope['py_callable'], (1, 2, 3), {'kwonly': 4})])
task.options = {}
my_action = task.actions[0]
my_action.execute()
assert [(1, 2, 3), 4] == got, repr(got)
def test_kwonlyargs_full(self):
got = []
scope = {'got': got}
exec(textwrap.dedent('''
def py_callable(pos, *args, kwonly=None, **kwargs):
got.append(pos)
got.append(args)
got.append(kwonly)
got.append(kwargs['foo'])
'''), scope)
task = Task('Fake', [
(scope['py_callable'], [1,2,3], {'kwonly': 4, 'foo': 5})])
task.options = {}
my_action = task.actions[0]
my_action.execute()
assert [1, (2, 3), 4, 5] == got, repr(got)
def test_action_modifies_task_but_not_attrs(self):
def py_callable(targets, dependencies, changed, task):
targets.append('new_target')
dependencies.append('new_dependency')
changed.append('new_changed')
task.file_dep.add('dep2')
my_task = Task('Fake', [py_callable], file_dep=['dependencies'],
targets=['targets'])
my_task.dep_changed = ['changed']
my_task.options = {}
my_action = my_task.actions[0]
my_action.execute()
assert my_task.file_dep == set(['dependencies', 'dep2'])
assert my_task.targets == ['targets']
assert my_task.dep_changed == ['changed']
##############
class TestCreateAction(object):
class TaskStub(object):
name = 'stub'
mytask = TaskStub()
def testBaseAction(self):
class Sample(action.BaseAction): pass
my_action = action.create_action(Sample(), self.mytask, 'actions')
assert isinstance(my_action, Sample)
assert self.mytask == my_action.task
def testStringAction(self):
my_action = action.create_action("xpto 14 7", self.mytask, 'actions')
assert isinstance(my_action, action.CmdAction)
assert my_action.shell == True
def testListStringAction(self):
my_action = action.create_action(["xpto", 14, 7], self.mytask, 'actions')
assert isinstance(my_action, action.CmdAction)
assert my_action.shell == False
def testMethodAction(self):
def dumb(): return
my_action = action.create_action(dumb, self.mytask, 'actions')
assert isinstance(my_action, action.PythonAction)
def testTupleAction(self):
def dumb(): return
my_action = action.create_action((dumb,[1,2],{'a':5}), self.mytask,
'actions')
assert isinstance(my_action, action.PythonAction)
def testTupleActionMoreThanThreeElements(self):
def dumb(): return
expected = "Task 'stub': invalid 'actions' tuple length"
with pytest.raises(action.InvalidTask, match=expected):
action.create_action((dumb,[1,2],{'a':5},'oo'), self.mytask,
'actions')
def testInvalidActionNone(self):
expected = "Task 'stub': invalid 'actions' type. got: None"
with pytest.raises(action.InvalidTask, match=expected):
action.create_action(None, self.mytask, 'actions')
def testInvalidActionObject(self):
expected = "Task 'stub': invalid 'actions' type. got: <"
with pytest.raises(action.InvalidTask, match=expected):
action.create_action(self, self.mytask, 'actions')
def test_invalid_action_task_param_name(self):
expected = "Task 'stub': invalid 'clean' type. got: True"
with pytest.raises(action.InvalidTask, match=expected):
action.create_action(True, self.mytask, 'clean')
|
rimbalinux/LMD3 | refs/heads/master | django/utils/daemonize.py | 13 | import os
import sys
if os.name == 'posix':
def become_daemon(our_home_dir='.', out_log='/dev/null',
err_log='/dev/null', umask=022):
"Robustly turn into a UNIX daemon, running in our_home_dir."
# First fork
try:
if os.fork() > 0:
sys.exit(0) # kill off parent
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid()
os.chdir(our_home_dir)
os.umask(umask)
# Second fork
try:
if os.fork() > 0:
os._exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
os._exit(1)
si = open('/dev/null', 'r')
so = open(out_log, 'a+', 0)
se = open(err_log, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Set custom file descriptors so that they get proper buffering.
sys.stdout, sys.stderr = so, se
else:
def become_daemon(our_home_dir='.', out_log=None, err_log=None, umask=022):
"""
If we're not running under a POSIX system, just simulate the daemon
mode by doing redirections and directory changing.
"""
os.chdir(our_home_dir)
os.umask(umask)
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
if err_log:
sys.stderr = open(err_log, 'a', 0)
else:
sys.stderr = NullDevice()
if out_log:
sys.stdout = open(out_log, 'a', 0)
else:
sys.stdout = NullDevice()
class NullDevice:
"A writeable object that writes to nowhere -- like /dev/null."
def write(self, s):
pass
|
Alwnikrotikz/pyglet | refs/heads/master | pyglet/libs/x11/xf86vmode.py | 46 | '''Wrapper for Xxf86vm
Generated with:
tools/genwrappers.py xf86vmode
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library('Xxf86vm')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
import pyglet.libs.x11.xlib
X_XF86VidModeQueryVersion = 0 # /usr/include/X11/extensions/xf86vmode.h:4885
X_XF86VidModeGetModeLine = 1 # /usr/include/X11/extensions/xf86vmode.h:4886
X_XF86VidModeModModeLine = 2 # /usr/include/X11/extensions/xf86vmode.h:4887
X_XF86VidModeSwitchMode = 3 # /usr/include/X11/extensions/xf86vmode.h:4888
X_XF86VidModeGetMonitor = 4 # /usr/include/X11/extensions/xf86vmode.h:4889
X_XF86VidModeLockModeSwitch = 5 # /usr/include/X11/extensions/xf86vmode.h:4890
X_XF86VidModeGetAllModeLines = 6 # /usr/include/X11/extensions/xf86vmode.h:4891
X_XF86VidModeAddModeLine = 7 # /usr/include/X11/extensions/xf86vmode.h:4892
X_XF86VidModeDeleteModeLine = 8 # /usr/include/X11/extensions/xf86vmode.h:4893
X_XF86VidModeValidateModeLine = 9 # /usr/include/X11/extensions/xf86vmode.h:4894
X_XF86VidModeSwitchToMode = 10 # /usr/include/X11/extensions/xf86vmode.h:4895
X_XF86VidModeGetViewPort = 11 # /usr/include/X11/extensions/xf86vmode.h:4896
X_XF86VidModeSetViewPort = 12 # /usr/include/X11/extensions/xf86vmode.h:4897
X_XF86VidModeGetDotClocks = 13 # /usr/include/X11/extensions/xf86vmode.h:4899
X_XF86VidModeSetClientVersion = 14 # /usr/include/X11/extensions/xf86vmode.h:4900
X_XF86VidModeSetGamma = 15 # /usr/include/X11/extensions/xf86vmode.h:4901
X_XF86VidModeGetGamma = 16 # /usr/include/X11/extensions/xf86vmode.h:4902
X_XF86VidModeGetGammaRamp = 17 # /usr/include/X11/extensions/xf86vmode.h:4903
X_XF86VidModeSetGammaRamp = 18 # /usr/include/X11/extensions/xf86vmode.h:4904
X_XF86VidModeGetGammaRampSize = 19 # /usr/include/X11/extensions/xf86vmode.h:4905
X_XF86VidModeGetPermissions = 20 # /usr/include/X11/extensions/xf86vmode.h:4906
CLKFLAG_PROGRAMABLE = 1 # /usr/include/X11/extensions/xf86vmode.h:4908
XF86VidModeNumberEvents = 0 # /usr/include/X11/extensions/xf86vmode.h:4919
XF86VidModeBadClock = 0 # /usr/include/X11/extensions/xf86vmode.h:4922
XF86VidModeBadHTimings = 1 # /usr/include/X11/extensions/xf86vmode.h:4923
XF86VidModeBadVTimings = 2 # /usr/include/X11/extensions/xf86vmode.h:4924
XF86VidModeModeUnsuitable = 3 # /usr/include/X11/extensions/xf86vmode.h:4925
XF86VidModeExtensionDisabled = 4 # /usr/include/X11/extensions/xf86vmode.h:4926
XF86VidModeClientNotLocal = 5 # /usr/include/X11/extensions/xf86vmode.h:4927
XF86VidModeZoomLocked = 6 # /usr/include/X11/extensions/xf86vmode.h:4928
XF86VidModeNumberErrors = 7 # /usr/include/X11/extensions/xf86vmode.h:4929
XF86VM_READ_PERMISSION = 1 # /usr/include/X11/extensions/xf86vmode.h:4931
XF86VM_WRITE_PERMISSION = 2 # /usr/include/X11/extensions/xf86vmode.h:4932
class struct_anon_93(Structure):
__slots__ = [
'hdisplay',
'hsyncstart',
'hsyncend',
'htotal',
'hskew',
'vdisplay',
'vsyncstart',
'vsyncend',
'vtotal',
'flags',
'privsize',
'private',
]
INT32 = c_int # /usr/include/X11/Xmd.h:135
struct_anon_93._fields_ = [
('hdisplay', c_ushort),
('hsyncstart', c_ushort),
('hsyncend', c_ushort),
('htotal', c_ushort),
('hskew', c_ushort),
('vdisplay', c_ushort),
('vsyncstart', c_ushort),
('vsyncend', c_ushort),
('vtotal', c_ushort),
('flags', c_uint),
('privsize', c_int),
('private', POINTER(INT32)),
]
XF86VidModeModeLine = struct_anon_93 # /usr/include/X11/extensions/xf86vmode.h:4954
class struct_anon_94(Structure):
__slots__ = [
'dotclock',
'hdisplay',
'hsyncstart',
'hsyncend',
'htotal',
'hskew',
'vdisplay',
'vsyncstart',
'vsyncend',
'vtotal',
'flags',
'privsize',
'private',
]
struct_anon_94._fields_ = [
('dotclock', c_uint),
('hdisplay', c_ushort),
('hsyncstart', c_ushort),
('hsyncend', c_ushort),
('htotal', c_ushort),
('hskew', c_ushort),
('vdisplay', c_ushort),
('vsyncstart', c_ushort),
('vsyncend', c_ushort),
('vtotal', c_ushort),
('flags', c_uint),
('privsize', c_int),
('private', POINTER(INT32)),
]
XF86VidModeModeInfo = struct_anon_94 # /usr/include/X11/extensions/xf86vmode.h:4975
class struct_anon_95(Structure):
__slots__ = [
'hi',
'lo',
]
struct_anon_95._fields_ = [
('hi', c_float),
('lo', c_float),
]
XF86VidModeSyncRange = struct_anon_95 # /usr/include/X11/extensions/xf86vmode.h:4980
class struct_anon_96(Structure):
__slots__ = [
'vendor',
'model',
'EMPTY',
'nhsync',
'hsync',
'nvsync',
'vsync',
]
struct_anon_96._fields_ = [
('vendor', c_char_p),
('model', c_char_p),
('EMPTY', c_float),
('nhsync', c_ubyte),
('hsync', POINTER(XF86VidModeSyncRange)),
('nvsync', c_ubyte),
('vsync', POINTER(XF86VidModeSyncRange)),
]
XF86VidModeMonitor = struct_anon_96 # /usr/include/X11/extensions/xf86vmode.h:4990
class struct_anon_97(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'root',
'state',
'kind',
'forced',
'time',
]
Display = pyglet.libs.x11.xlib.Display
Window = pyglet.libs.x11.xlib.Window
Time = pyglet.libs.x11.xlib.Time
struct_anon_97._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('root', Window),
('state', c_int),
('kind', c_int),
('forced', c_int),
('time', Time),
]
XF86VidModeNotifyEvent = struct_anon_97 # /usr/include/X11/extensions/xf86vmode.h:5002
class struct_anon_98(Structure):
__slots__ = [
'red',
'green',
'blue',
]
struct_anon_98._fields_ = [
('red', c_float),
('green', c_float),
('blue', c_float),
]
XF86VidModeGamma = struct_anon_98 # /usr/include/X11/extensions/xf86vmode.h:5008
# /usr/include/X11/extensions/xf86vmode.h:5018
XF86VidModeQueryVersion = _lib.XF86VidModeQueryVersion
XF86VidModeQueryVersion.restype = c_int
XF86VidModeQueryVersion.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5024
XF86VidModeQueryExtension = _lib.XF86VidModeQueryExtension
XF86VidModeQueryExtension.restype = c_int
XF86VidModeQueryExtension.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5030
XF86VidModeSetClientVersion = _lib.XF86VidModeSetClientVersion
XF86VidModeSetClientVersion.restype = c_int
XF86VidModeSetClientVersion.argtypes = [POINTER(Display)]
# /usr/include/X11/extensions/xf86vmode.h:5034
XF86VidModeGetModeLine = _lib.XF86VidModeGetModeLine
XF86VidModeGetModeLine.restype = c_int
XF86VidModeGetModeLine.argtypes = [POINTER(Display), c_int, POINTER(c_int), POINTER(XF86VidModeModeLine)]
# /usr/include/X11/extensions/xf86vmode.h:5041
XF86VidModeGetAllModeLines = _lib.XF86VidModeGetAllModeLines
XF86VidModeGetAllModeLines.restype = c_int
XF86VidModeGetAllModeLines.argtypes = [POINTER(Display), c_int, POINTER(c_int), POINTER(POINTER(POINTER(XF86VidModeModeInfo)))]
# /usr/include/X11/extensions/xf86vmode.h:5048
XF86VidModeAddModeLine = _lib.XF86VidModeAddModeLine
XF86VidModeAddModeLine.restype = c_int
XF86VidModeAddModeLine.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeModeInfo), POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5055
XF86VidModeDeleteModeLine = _lib.XF86VidModeDeleteModeLine
XF86VidModeDeleteModeLine.restype = c_int
XF86VidModeDeleteModeLine.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5061
XF86VidModeModModeLine = _lib.XF86VidModeModModeLine
XF86VidModeModModeLine.restype = c_int
XF86VidModeModModeLine.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeModeLine)]
# /usr/include/X11/extensions/xf86vmode.h:5067
XF86VidModeValidateModeLine = _lib.XF86VidModeValidateModeLine
XF86VidModeValidateModeLine.restype = c_int
XF86VidModeValidateModeLine.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5073
XF86VidModeSwitchMode = _lib.XF86VidModeSwitchMode
XF86VidModeSwitchMode.restype = c_int
XF86VidModeSwitchMode.argtypes = [POINTER(Display), c_int, c_int]
# /usr/include/X11/extensions/xf86vmode.h:5079
XF86VidModeSwitchToMode = _lib.XF86VidModeSwitchToMode
XF86VidModeSwitchToMode.restype = c_int
XF86VidModeSwitchToMode.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeModeInfo)]
# /usr/include/X11/extensions/xf86vmode.h:5085
XF86VidModeLockModeSwitch = _lib.XF86VidModeLockModeSwitch
XF86VidModeLockModeSwitch.restype = c_int
XF86VidModeLockModeSwitch.argtypes = [POINTER(Display), c_int, c_int]
# /usr/include/X11/extensions/xf86vmode.h:5091
XF86VidModeGetMonitor = _lib.XF86VidModeGetMonitor
XF86VidModeGetMonitor.restype = c_int
XF86VidModeGetMonitor.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeMonitor)]
# /usr/include/X11/extensions/xf86vmode.h:5097
XF86VidModeGetViewPort = _lib.XF86VidModeGetViewPort
XF86VidModeGetViewPort.restype = c_int
XF86VidModeGetViewPort.argtypes = [POINTER(Display), c_int, POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5104
XF86VidModeSetViewPort = _lib.XF86VidModeSetViewPort
XF86VidModeSetViewPort.restype = c_int
XF86VidModeSetViewPort.argtypes = [POINTER(Display), c_int, c_int, c_int]
# /usr/include/X11/extensions/xf86vmode.h:5111
XF86VidModeGetDotClocks = _lib.XF86VidModeGetDotClocks
XF86VidModeGetDotClocks.restype = c_int
XF86VidModeGetDotClocks.argtypes = [POINTER(Display), c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(POINTER(c_int))]
# /usr/include/X11/extensions/xf86vmode.h:5120
XF86VidModeGetGamma = _lib.XF86VidModeGetGamma
XF86VidModeGetGamma.restype = c_int
XF86VidModeGetGamma.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeGamma)]
# /usr/include/X11/extensions/xf86vmode.h:5126
XF86VidModeSetGamma = _lib.XF86VidModeSetGamma
XF86VidModeSetGamma.restype = c_int
XF86VidModeSetGamma.argtypes = [POINTER(Display), c_int, POINTER(XF86VidModeGamma)]
# /usr/include/X11/extensions/xf86vmode.h:5132
XF86VidModeSetGammaRamp = _lib.XF86VidModeSetGammaRamp
XF86VidModeSetGammaRamp.restype = c_int
XF86VidModeSetGammaRamp.argtypes = [POINTER(Display), c_int, c_int, POINTER(c_ushort), POINTER(c_ushort), POINTER(c_ushort)]
# /usr/include/X11/extensions/xf86vmode.h:5141
XF86VidModeGetGammaRamp = _lib.XF86VidModeGetGammaRamp
XF86VidModeGetGammaRamp.restype = c_int
XF86VidModeGetGammaRamp.argtypes = [POINTER(Display), c_int, c_int, POINTER(c_ushort), POINTER(c_ushort), POINTER(c_ushort)]
# /usr/include/X11/extensions/xf86vmode.h:5150
XF86VidModeGetGammaRampSize = _lib.XF86VidModeGetGammaRampSize
XF86VidModeGetGammaRampSize.restype = c_int
XF86VidModeGetGammaRampSize.argtypes = [POINTER(Display), c_int, POINTER(c_int)]
# /usr/include/X11/extensions/xf86vmode.h:5156
XF86VidModeGetPermissions = _lib.XF86VidModeGetPermissions
XF86VidModeGetPermissions.restype = c_int
XF86VidModeGetPermissions.argtypes = [POINTER(Display), c_int, POINTER(c_int)]
__all__ = ['X_XF86VidModeQueryVersion', 'X_XF86VidModeGetModeLine',
'X_XF86VidModeModModeLine', 'X_XF86VidModeSwitchMode',
'X_XF86VidModeGetMonitor', 'X_XF86VidModeLockModeSwitch',
'X_XF86VidModeGetAllModeLines', 'X_XF86VidModeAddModeLine',
'X_XF86VidModeDeleteModeLine', 'X_XF86VidModeValidateModeLine',
'X_XF86VidModeSwitchToMode', 'X_XF86VidModeGetViewPort',
'X_XF86VidModeSetViewPort', 'X_XF86VidModeGetDotClocks',
'X_XF86VidModeSetClientVersion', 'X_XF86VidModeSetGamma',
'X_XF86VidModeGetGamma', 'X_XF86VidModeGetGammaRamp',
'X_XF86VidModeSetGammaRamp', 'X_XF86VidModeGetGammaRampSize',
'X_XF86VidModeGetPermissions', 'CLKFLAG_PROGRAMABLE',
'XF86VidModeNumberEvents', 'XF86VidModeBadClock', 'XF86VidModeBadHTimings',
'XF86VidModeBadVTimings', 'XF86VidModeModeUnsuitable',
'XF86VidModeExtensionDisabled', 'XF86VidModeClientNotLocal',
'XF86VidModeZoomLocked', 'XF86VidModeNumberErrors', 'XF86VM_READ_PERMISSION',
'XF86VM_WRITE_PERMISSION', 'XF86VidModeModeLine', 'XF86VidModeModeInfo',
'XF86VidModeSyncRange', 'XF86VidModeMonitor', 'XF86VidModeNotifyEvent',
'XF86VidModeGamma', 'XF86VidModeQueryVersion', 'XF86VidModeQueryExtension',
'XF86VidModeSetClientVersion', 'XF86VidModeGetModeLine',
'XF86VidModeGetAllModeLines', 'XF86VidModeAddModeLine',
'XF86VidModeDeleteModeLine', 'XF86VidModeModModeLine',
'XF86VidModeValidateModeLine', 'XF86VidModeSwitchMode',
'XF86VidModeSwitchToMode', 'XF86VidModeLockModeSwitch',
'XF86VidModeGetMonitor', 'XF86VidModeGetViewPort', 'XF86VidModeSetViewPort',
'XF86VidModeGetDotClocks', 'XF86VidModeGetGamma', 'XF86VidModeSetGamma',
'XF86VidModeSetGammaRamp', 'XF86VidModeGetGammaRamp',
'XF86VidModeGetGammaRampSize', 'XF86VidModeGetPermissions']
|
gfneto/bitcoin-abe | refs/heads/master | build/lib.linux-x86_64-2.7/Abe/DataStore.py | 6 | # Copyright(C) 2011,2012,2013,2014 by Abe developers.
# DataStore.py: back end database access for Abe.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/agpl.html>.
# This module combines three functions that might be better split up:
# 1. Abe's schema
# 2. Abstraction over the schema for importing blocks, etc.
# 3. Code to load data by scanning blockfiles or using JSON-RPC.
import os
import re
import errno
import logging
import SqlAbstraction
import Chain
# bitcointools -- modified deserialize.py to return raw transaction
import BCDataStream
import deserialize
import util
import base58
SCHEMA_TYPE = "Abe"
SCHEMA_VERSION = SCHEMA_TYPE + "39"
CONFIG_DEFAULTS = {
"dbtype": None,
"connect_args": None,
"binary_type": None,
"int_type": None,
"upgrade": None,
"rescan": None,
"commit_bytes": None,
"log_sql": None,
"log_rpc": None,
"datadir": None,
"ignore_bit8_chains": None,
"use_firstbits": False,
"keep_scriptsig": True,
"import_tx": [],
"default_loader": "default",
}
WORK_BITS = 304 # XXX more than necessary.
CHAIN_CONFIG = [
{"chain":"Bitcoin"},
{"chain":"Testnet"},
{"chain":"Namecoin"},
{"chain":"Weeds", "policy":"Sha256Chain",
"code3":"WDS", "address_version":"\xf3", "magic":"\xf8\xbf\xb5\xda"},
{"chain":"BeerTokens", "policy":"Sha256Chain",
"code3":"BER", "address_version":"\xf2", "magic":"\xf7\xbf\xb5\xdb"},
{"chain":"SolidCoin", "policy":"Sha256Chain",
"code3":"SCN", "address_version":"\x7d", "magic":"\xde\xad\xba\xbe"},
{"chain":"ScTestnet", "policy":"Sha256Chain",
"code3":"SC0", "address_version":"\x6f", "magic":"\xca\xfe\xba\xbe"},
{"chain":"Worldcoin", "policy":"Sha256Chain",
"code3":"WDC", "address_version":"\x49", "magic":"\xfb\xc0\xb6\xdb"},
{"chain":"NovaCoin"},
{"chain":"CryptoCash"},
{"chain":"Anoncoin", "policy":"Sha256Chain",
"code3":"ANC", "address_version":"\x17", "magic":"\xFA\xCA\xBA\xDA" },
{"chain":"Hirocoin"},
{"chain":"Bitleu"},
{"chain":"Maxcoin"},
{"chain":"Dash"},
{"chain":"BlackCoin"},
{"chain":"Unbreakablecoin"},
#{"chain":"",
# "code3":"", "address_version":"\x", "magic":""},
]
NULL_PUBKEY_HASH = "\0" * Chain.PUBKEY_HASH_LENGTH
NULL_PUBKEY_ID = 0
PUBKEY_ID_NETWORK_FEE = NULL_PUBKEY_ID
# Size of the script and pubkey columns in bytes.
MAX_SCRIPT = 1000000
MAX_PUBKEY = 65
NO_CLOB = 'BUG_NO_CLOB'
# XXX This belongs in another module.
class InvalidBlock(Exception):
pass
class MerkleRootMismatch(InvalidBlock):
def __init__(ex, block_hash, tx_hashes):
ex.block_hash = block_hash
ex.tx_hashes = tx_hashes
def __str__(ex):
return 'Block header Merkle root does not match its transactions. ' \
'block hash=%s' % (ex.block_hash[::-1].encode('hex'),)
class MalformedHash(ValueError):
pass
class MalformedAddress(ValueError):
pass
class DataStore(object):
"""
Bitcoin data storage class based on DB-API 2 and standard SQL with
workarounds to support SQLite3, PostgreSQL/psycopg2, MySQL,
Oracle, ODBC, and IBM DB2.
"""
def __init__(store, args):
"""
Open and store a connection to the SQL database.
args.dbtype should name a DB-API 2 driver module, e.g.,
"sqlite3".
args.connect_args should be an argument to the module's
connect() method, or None for no argument, or a list of
arguments, or a dictionary of named arguments.
args.datadir names Bitcoin data directories containing
blk0001.dat to scan for new blocks.
"""
if args.datadir is None:
args.datadir = util.determine_db_dir()
if isinstance(args.datadir, str):
args.datadir = [args.datadir]
store.args = args
store.log = logging.getLogger(__name__)
store.rpclog = logging.getLogger(__name__ + ".rpc")
if not args.log_rpc:
store.rpclog.setLevel(logging.ERROR)
if args.dbtype is None:
store.log.warn("dbtype not configured, see abe.conf for examples");
store.dbmodule = None
store.config = CONFIG_DEFAULTS.copy()
store.datadirs = []
store.use_firstbits = CONFIG_DEFAULTS['use_firstbits']
store._sql = None
return
store.dbmodule = __import__(args.dbtype)
sql_args = lambda: 1
sql_args.module = store.dbmodule
sql_args.connect_args = args.connect_args
sql_args.binary_type = args.binary_type
sql_args.int_type = args.int_type
sql_args.log_sql = args.log_sql
sql_args.prefix = "abe_"
sql_args.config = {}
store.sql_args = sql_args
store.set_db(None)
store.init_sql()
store._blocks = {}
# Read the CONFIG and CONFIGVAR tables if present.
store.config = store._read_config()
if store.config is None:
store.keep_scriptsig = args.keep_scriptsig
elif 'keep_scriptsig' in store.config:
store.keep_scriptsig = store.config.get('keep_scriptsig') == "true"
else:
store.keep_scriptsig = CONFIG_DEFAULTS['keep_scriptsig']
store.refresh_ddl()
if store.config is None:
store.initialize()
else:
store.init_sql()
if store.config['schema_version'] == SCHEMA_VERSION:
pass
elif args.upgrade:
import upgrade
upgrade.upgrade_schema(store)
else:
raise Exception(
"Database schema version (%s) does not match software"
" (%s). Please run with --upgrade to convert database."
% (store.config['schema_version'], SCHEMA_VERSION))
store._sql.auto_reconnect = True
if args.rescan:
store.sql("UPDATE datadir SET blkfile_number=1, blkfile_offset=0")
store._init_datadirs()
store.init_chains()
store.commit_bytes = args.commit_bytes
if store.commit_bytes is None:
store.commit_bytes = 0 # Commit whenever possible.
else:
store.commit_bytes = int(store.commit_bytes)
store.bytes_since_commit = 0
store.use_firstbits = (store.config['use_firstbits'] == "true")
for hex_tx in args.import_tx:
chain_name = None
if isinstance(hex_tx, dict):
chain_name = hex_tx.get("chain")
hex_tx = hex_tx.get("tx")
store.maybe_import_binary_tx(chain_name, str(hex_tx).decode('hex'))
store.default_loader = args.default_loader
store.commit()
def set_db(store, db):
store._sql = db
def get_db(store):
return store._sql
def connect(store):
return store._sql.connect()
def reconnect(store):
return store._sql.reconnect()
def close(store):
store._sql.close()
def commit(store):
store._sql.commit()
def rollback(store):
if store._sql is not None:
store._sql.rollback()
def sql(store, stmt, params=()):
store._sql.sql(stmt, params)
def ddl(store, stmt):
store._sql.ddl(stmt)
def selectrow(store, stmt, params=()):
return store._sql.selectrow(stmt, params)
def selectall(store, stmt, params=()):
return store._sql.selectall(stmt, params)
def rowcount(store):
return store._sql.rowcount()
def create_sequence(store, key):
store._sql.create_sequence(key)
def drop_sequence(store, key):
store._sql.drop_sequence(key)
def new_id(store, key):
return store._sql.new_id(key)
def init_sql(store):
sql_args = store.sql_args
if hasattr(store, 'config'):
for name in store.config.keys():
if name.startswith('sql.'):
sql_args.config[name[len('sql.'):]] = store.config[name]
if store._sql:
store._sql.close() # XXX Could just set_flavour.
store.set_db(SqlAbstraction.SqlAbstraction(sql_args))
store.init_binfuncs()
def init_binfuncs(store):
store.binin = store._sql.binin
store.binin_hex = store._sql.binin_hex
store.binin_int = store._sql.binin_int
store.binout = store._sql.binout
store.binout_hex = store._sql.binout_hex
store.binout_int = store._sql.binout_int
store.intin = store._sql.intin
store.hashin = store._sql.revin
store.hashin_hex = store._sql.revin_hex
store.hashout = store._sql.revout
store.hashout_hex = store._sql.revout_hex
def _read_config(store):
# Read table CONFIGVAR if it exists.
config = {}
try:
for name, value in store.selectall("""
SELECT configvar_name, configvar_value
FROM configvar"""):
config[name] = '' if value is None else value
if config:
return config
except store.dbmodule.DatabaseError:
try:
store.rollback()
except Exception:
pass
# Read legacy table CONFIG if it exists.
try:
row = store.selectrow("""
SELECT schema_version, binary_type
FROM config
WHERE config_id = 1""")
sv, btype = row
return { 'schema_version': sv, 'binary_type': btype }
except Exception:
try:
store.rollback()
except Exception:
pass
# Return None to indicate no schema found.
return None
def _init_datadirs(store):
"""Parse store.args.datadir, create store.datadirs."""
if store.args.datadir == []:
store.datadirs = []
return
datadirs = {}
for row in store.selectall("""
SELECT datadir_id, dirname, blkfile_number, blkfile_offset,
chain_id
FROM datadir"""):
id, dir, num, offs, chain_id = row
datadirs[dir] = {
"id": id,
"dirname": dir,
"blkfile_number": int(num),
"blkfile_offset": int(offs),
"chain_id": None if chain_id is None else int(chain_id),
"loader": None}
#print("datadirs: %r" % datadirs)
# By default, scan every dir we know. This doesn't happen in
# practise, because abe.py sets ~/.bitcoin as default datadir.
if store.args.datadir is None:
store.datadirs = datadirs.values()
return
def lookup_chain_id(name):
row = store.selectrow(
"SELECT chain_id FROM chain WHERE chain_name = ?",
(name,))
return None if row is None else int(row[0])
store.datadirs = []
for dircfg in store.args.datadir:
loader = None
conf = None
if isinstance(dircfg, dict):
#print("dircfg is dict: %r" % dircfg) # XXX
dirname = dircfg.get('dirname')
if dirname is None:
raise ValueError(
'Missing dirname in datadir configuration: '
+ str(dircfg))
if dirname in datadirs:
d = datadirs[dirname]
d['loader'] = dircfg.get('loader')
d['conf'] = dircfg.get('conf')
if d['chain_id'] is None and 'chain' in dircfg:
d['chain_id'] = lookup_chain_id(dircfg['chain'])
store.datadirs.append(d)
continue
loader = dircfg.get('loader')
conf = dircfg.get('conf')
chain_id = dircfg.get('chain_id')
if chain_id is None:
chain_name = dircfg.get('chain')
chain_id = lookup_chain_id(chain_name)
if chain_id is None and chain_name is not None:
chain_id = store.new_id('chain')
code3 = dircfg.get('code3')
if code3 is None:
# XXX Should default via policy.
code3 = '000' if chain_id > 999 else "%03d" % (
chain_id,)
addr_vers = dircfg.get('address_version')
if addr_vers is None:
addr_vers = "\0"
elif isinstance(addr_vers, unicode):
addr_vers = addr_vers.encode('latin_1')
script_addr_vers = dircfg.get('script_addr_vers')
if script_addr_vers is None:
script_addr_vers = "\x05"
elif isinstance(script_addr_vers, unicode):
script_addr_vers = script_addr_vers.encode('latin_1')
decimals = dircfg.get('decimals')
if decimals is not None:
decimals = int(decimals)
# XXX Could do chain_magic, but this datadir won't
# use it, because it knows its chain.
store.sql("""
INSERT INTO chain (
chain_id, chain_name, chain_code3,
chain_address_version, chain_script_addr_vers, chain_policy,
chain_decimals
) VALUES (?, ?, ?, ?, ?, ?, ?)""",
(chain_id, chain_name, code3,
store.binin(addr_vers), store.binin(script_addr_vers),
dircfg.get('policy', chain_name), decimals))
store.commit()
store.log.warning("Assigned chain_id %d to %s",
chain_id, chain_name)
elif dircfg in datadirs:
store.datadirs.append(datadirs[dircfg])
continue
else:
# Not a dict. A string naming a directory holding
# standard chains.
dirname = dircfg
chain_id = None
d = {
"id": store.new_id("datadir"),
"dirname": dirname,
"blkfile_number": 1,
"blkfile_offset": 0,
"chain_id": chain_id,
"loader": loader,
"conf": conf,
}
store.datadirs.append(d)
def init_chains(store):
store.chains_by = lambda: 0
store.chains_by.id = {}
store.chains_by.name = {}
store.chains_by.magic = {}
# Legacy config option.
no_bit8_chains = store.args.ignore_bit8_chains or []
if isinstance(no_bit8_chains, str):
no_bit8_chains = [no_bit8_chains]
for chain_id, magic, chain_name, chain_code3, address_version, script_addr_vers, \
chain_policy, chain_decimals in \
store.selectall("""
SELECT chain_id, chain_magic, chain_name, chain_code3,
chain_address_version, chain_script_addr_vers, chain_policy, chain_decimals
FROM chain
"""):
chain = Chain.create(
id = int(chain_id),
magic = store.binout(magic),
name = unicode(chain_name),
code3 = chain_code3 and unicode(chain_code3),
address_version = store.binout(address_version),
script_addr_vers = store.binout(script_addr_vers),
policy = unicode(chain_policy),
decimals = None if chain_decimals is None else \
int(chain_decimals))
# Legacy config option.
if chain.name in no_bit8_chains and \
chain.has_feature('block_version_bit8_merge_mine'):
chain = Chain.create(src=chain, policy="LegacyNoBit8")
store.chains_by.id[chain.id] = chain
store.chains_by.name[chain.name] = chain
store.chains_by.magic[bytes(chain.magic)] = chain
def get_chain_by_id(store, chain_id):
return store.chains_by.id[int(chain_id)]
def get_chain_by_name(store, name):
return store.chains_by.name.get(name, None)
def get_default_chain(store):
store.log.debug("Falling back to default (Bitcoin) policy.")
return Chain.create(None)
def get_ddl(store, key):
return store._ddl[key]
def refresh_ddl(store):
store._ddl = {
"chain_summary":
# XXX I could do a lot with MATERIALIZED views.
"""CREATE VIEW chain_summary AS SELECT
cc.chain_id,
cc.in_longest,
b.block_id,
b.block_hash,
b.block_version,
b.block_hashMerkleRoot,
b.block_nTime,
b.block_nBits,
b.block_nNonce,
cc.block_height,
b.prev_block_id,
prev.block_hash prev_block_hash,
b.block_chain_work,
b.block_num_tx,
b.block_value_in,
b.block_value_out,
b.block_total_satoshis,
b.block_total_seconds,
b.block_satoshi_seconds,
b.block_total_ss,
b.block_ss_destroyed
FROM chain_candidate cc
JOIN block b ON (cc.block_id = b.block_id)
LEFT JOIN block prev ON (b.prev_block_id = prev.block_id)""",
"txout_detail":
"""CREATE VIEW txout_detail AS SELECT
cc.chain_id,
cc.in_longest,
cc.block_id,
b.block_hash,
b.block_height,
block_tx.tx_pos,
tx.tx_id,
tx.tx_hash,
tx.tx_lockTime,
tx.tx_version,
tx.tx_size,
txout.txout_id,
txout.txout_pos,
txout.txout_value,
txout.txout_scriptPubKey,
pubkey.pubkey_id,
pubkey.pubkey_hash,
pubkey.pubkey
FROM chain_candidate cc
JOIN block b ON (cc.block_id = b.block_id)
JOIN block_tx ON (b.block_id = block_tx.block_id)
JOIN tx ON (tx.tx_id = block_tx.tx_id)
JOIN txout ON (tx.tx_id = txout.tx_id)
LEFT JOIN pubkey ON (txout.pubkey_id = pubkey.pubkey_id)""",
"txin_detail":
"""CREATE VIEW txin_detail AS SELECT
cc.chain_id,
cc.in_longest,
cc.block_id,
b.block_hash,
b.block_height,
block_tx.tx_pos,
tx.tx_id,
tx.tx_hash,
tx.tx_lockTime,
tx.tx_version,
tx.tx_size,
txin.txin_id,
txin.txin_pos,
txin.txout_id prevout_id""" + (""",
txin.txin_scriptSig,
txin.txin_sequence""" if store.keep_scriptsig else """,
NULL txin_scriptSig,
NULL txin_sequence""") + """,
prevout.txout_value txin_value,
prevout.txout_scriptPubKey txin_scriptPubKey,
pubkey.pubkey_id,
pubkey.pubkey_hash,
pubkey.pubkey
FROM chain_candidate cc
JOIN block b ON (cc.block_id = b.block_id)
JOIN block_tx ON (b.block_id = block_tx.block_id)
JOIN tx ON (tx.tx_id = block_tx.tx_id)
JOIN txin ON (tx.tx_id = txin.tx_id)
LEFT JOIN txout prevout ON (txin.txout_id = prevout.txout_id)
LEFT JOIN pubkey
ON (prevout.pubkey_id = pubkey.pubkey_id)""",
"txout_approx":
# View of txout for drivers like sqlite3 that can not handle large
# integer arithmetic. For them, we transform the definition of
# txout_approx_value to DOUBLE PRECISION (approximate) by a CAST.
"""CREATE VIEW txout_approx AS SELECT
txout_id,
tx_id,
txout_value txout_approx_value
FROM txout""",
"configvar":
# ABE accounting. This table is read without knowledge of the
# database's SQL quirks, so it must use only the most widely supported
# features.
"""CREATE TABLE configvar (
configvar_name VARCHAR(100) NOT NULL PRIMARY KEY,
configvar_value VARCHAR(255)
)""",
"abe_sequences":
"""CREATE TABLE abe_sequences (
sequence_key VARCHAR(100) NOT NULL PRIMARY KEY,
nextid NUMERIC(30)
)""",
}
def initialize(store):
"""
Create the database schema.
"""
store.config = {}
store.configure()
for stmt in (
store._ddl['configvar'],
"""CREATE TABLE datadir (
datadir_id NUMERIC(10) NOT NULL PRIMARY KEY,
dirname VARCHAR(2000) NOT NULL,
blkfile_number NUMERIC(8) NULL,
blkfile_offset NUMERIC(20) NULL,
chain_id NUMERIC(10) NULL
)""",
# A block of the type used by Bitcoin.
"""CREATE TABLE block (
block_id NUMERIC(14) NOT NULL PRIMARY KEY,
block_hash BINARY(32) UNIQUE NOT NULL,
block_version NUMERIC(10),
block_hashMerkleRoot BINARY(32),
block_nTime NUMERIC(20),
block_nBits NUMERIC(10),
block_nNonce NUMERIC(10),
block_height NUMERIC(14) NULL,
prev_block_id NUMERIC(14) NULL,
search_block_id NUMERIC(14) NULL,
block_chain_work BINARY(""" + str(WORK_BITS / 8) + """),
block_value_in NUMERIC(30) NULL,
block_value_out NUMERIC(30),
block_total_satoshis NUMERIC(26) NULL,
block_total_seconds NUMERIC(20) NULL,
block_satoshi_seconds NUMERIC(28) NULL,
block_total_ss NUMERIC(28) NULL,
block_num_tx NUMERIC(10) NOT NULL,
block_ss_destroyed NUMERIC(28) NULL,
FOREIGN KEY (prev_block_id)
REFERENCES block (block_id),
FOREIGN KEY (search_block_id)
REFERENCES block (block_id)
)""",
# CHAIN comprises a magic number, a policy, and (indirectly via
# CHAIN_LAST_BLOCK_ID and the referenced block's ancestors) a genesis
# block, possibly null. A chain may have a currency code.
"""CREATE TABLE chain (
chain_id NUMERIC(10) NOT NULL PRIMARY KEY,
chain_name VARCHAR(100) UNIQUE NOT NULL,
chain_code3 VARCHAR(5) NULL,
chain_address_version VARBINARY(100) NOT NULL,
chain_script_addr_vers VARBINARY(100) NULL,
chain_magic BINARY(4) NULL,
chain_policy VARCHAR(255) NOT NULL,
chain_decimals NUMERIC(2) NULL,
chain_last_block_id NUMERIC(14) NULL,
FOREIGN KEY (chain_last_block_id)
REFERENCES block (block_id)
)""",
# CHAIN_CANDIDATE lists blocks that are, or might become, part of the
# given chain. IN_LONGEST is 1 when the block is in the chain, else 0.
# IN_LONGEST denormalizes information stored canonically in
# CHAIN.CHAIN_LAST_BLOCK_ID and BLOCK.PREV_BLOCK_ID.
"""CREATE TABLE chain_candidate (
chain_id NUMERIC(10) NOT NULL,
block_id NUMERIC(14) NOT NULL,
in_longest NUMERIC(1),
block_height NUMERIC(14),
PRIMARY KEY (chain_id, block_id),
FOREIGN KEY (block_id) REFERENCES block (block_id)
)""",
"""CREATE INDEX x_cc_block ON chain_candidate (block_id)""",
"""CREATE INDEX x_cc_chain_block_height
ON chain_candidate (chain_id, block_height)""",
"""CREATE INDEX x_cc_block_height ON chain_candidate (block_height)""",
# An orphan block must remember its hashPrev.
"""CREATE TABLE orphan_block (
block_id NUMERIC(14) NOT NULL PRIMARY KEY,
block_hashPrev BINARY(32) NOT NULL,
FOREIGN KEY (block_id) REFERENCES block (block_id)
)""",
"""CREATE INDEX x_orphan_block_hashPrev ON orphan_block (block_hashPrev)""",
# Denormalize the relationship inverse to BLOCK.PREV_BLOCK_ID.
"""CREATE TABLE block_next (
block_id NUMERIC(14) NOT NULL,
next_block_id NUMERIC(14) NOT NULL,
PRIMARY KEY (block_id, next_block_id),
FOREIGN KEY (block_id) REFERENCES block (block_id),
FOREIGN KEY (next_block_id) REFERENCES block (block_id)
)""",
# A transaction of the type used by Bitcoin.
"""CREATE TABLE tx (
tx_id NUMERIC(26) NOT NULL PRIMARY KEY,
tx_hash BINARY(32) UNIQUE NOT NULL,
tx_version NUMERIC(10),
tx_lockTime NUMERIC(10),
tx_size NUMERIC(10)
)""",
# Presence of transactions in blocks is many-to-many.
"""CREATE TABLE block_tx (
block_id NUMERIC(14) NOT NULL,
tx_id NUMERIC(26) NOT NULL,
tx_pos NUMERIC(10) NOT NULL,
PRIMARY KEY (block_id, tx_id),
UNIQUE (block_id, tx_pos),
FOREIGN KEY (block_id)
REFERENCES block (block_id),
FOREIGN KEY (tx_id)
REFERENCES tx (tx_id)
)""",
"""CREATE INDEX x_block_tx_tx ON block_tx (tx_id)""",
# A public key for sending bitcoins. PUBKEY_HASH is derivable from a
# Bitcoin or Testnet address.
"""CREATE TABLE pubkey (
pubkey_id NUMERIC(26) NOT NULL PRIMARY KEY,
pubkey_hash BINARY(20) UNIQUE NOT NULL,
pubkey VARBINARY(""" + str(MAX_PUBKEY) + """) NULL
)""",
"""CREATE TABLE multisig_pubkey (
multisig_id NUMERIC(26) NOT NULL,
pubkey_id NUMERIC(26) NOT NULL,
PRIMARY KEY (multisig_id, pubkey_id),
FOREIGN KEY (multisig_id) REFERENCES pubkey (pubkey_id),
FOREIGN KEY (pubkey_id) REFERENCES pubkey (pubkey_id)
)""",
"""CREATE INDEX x_multisig_pubkey_pubkey ON multisig_pubkey (pubkey_id)""",
# A transaction out-point.
"""CREATE TABLE txout (
txout_id NUMERIC(26) NOT NULL PRIMARY KEY,
tx_id NUMERIC(26) NOT NULL,
txout_pos NUMERIC(10) NOT NULL,
txout_value NUMERIC(30) NOT NULL,
txout_scriptPubKey VARBINARY(""" + str(MAX_SCRIPT) + """),
pubkey_id NUMERIC(26),
UNIQUE (tx_id, txout_pos),
FOREIGN KEY (pubkey_id)
REFERENCES pubkey (pubkey_id)
)""",
"""CREATE INDEX x_txout_pubkey ON txout (pubkey_id)""",
# A transaction in-point.
"""CREATE TABLE txin (
txin_id NUMERIC(26) NOT NULL PRIMARY KEY,
tx_id NUMERIC(26) NOT NULL,
txin_pos NUMERIC(10) NOT NULL,
txout_id NUMERIC(26)""" + (""",
txin_scriptSig VARBINARY(""" + str(MAX_SCRIPT) + """),
txin_sequence NUMERIC(10)""" if store.keep_scriptsig else "") + """,
UNIQUE (tx_id, txin_pos),
FOREIGN KEY (tx_id)
REFERENCES tx (tx_id)
)""",
"""CREATE INDEX x_txin_txout ON txin (txout_id)""",
# While TXIN.TXOUT_ID can not be found, we must remember TXOUT_POS,
# a.k.a. PREVOUT_N.
"""CREATE TABLE unlinked_txin (
txin_id NUMERIC(26) NOT NULL PRIMARY KEY,
txout_tx_hash BINARY(32) NOT NULL,
txout_pos NUMERIC(10) NOT NULL,
FOREIGN KEY (txin_id) REFERENCES txin (txin_id)
)""",
"""CREATE INDEX x_unlinked_txin_outpoint
ON unlinked_txin (txout_tx_hash, txout_pos)""",
"""CREATE TABLE block_txin (
block_id NUMERIC(14) NOT NULL,
txin_id NUMERIC(26) NOT NULL,
out_block_id NUMERIC(14) NOT NULL,
PRIMARY KEY (block_id, txin_id),
FOREIGN KEY (block_id) REFERENCES block (block_id),
FOREIGN KEY (txin_id) REFERENCES txin (txin_id),
FOREIGN KEY (out_block_id) REFERENCES block (block_id)
)""",
store._ddl['chain_summary'],
store._ddl['txout_detail'],
store._ddl['txin_detail'],
store._ddl['txout_approx'],
"""CREATE TABLE abe_lock (
lock_id NUMERIC(10) NOT NULL PRIMARY KEY,
pid VARCHAR(255) NULL
)""",
):
try:
store.ddl(stmt)
except Exception:
store.log.error("Failed: %s", stmt)
raise
for key in ['chain', 'datadir',
'tx', 'txout', 'pubkey', 'txin', 'block']:
store.create_sequence(key)
store.sql("INSERT INTO abe_lock (lock_id) VALUES (1)")
# Insert some well-known chain metadata.
for conf in CHAIN_CONFIG:
conf = conf.copy()
conf["name"] = conf.pop("chain")
if 'policy' in conf:
policy = conf.pop('policy')
else:
policy = conf['name']
chain = Chain.create(policy, **conf)
store.insert_chain(chain)
store.sql("""
INSERT INTO pubkey (pubkey_id, pubkey_hash) VALUES (?, ?)""",
(NULL_PUBKEY_ID, store.binin(NULL_PUBKEY_HASH)))
if store.args.use_firstbits:
store.config['use_firstbits'] = "true"
store.ddl(
"""CREATE TABLE abe_firstbits (
pubkey_id NUMERIC(26) NOT NULL,
block_id NUMERIC(14) NOT NULL,
address_version VARBINARY(10) NOT NULL,
firstbits VARCHAR(50) NOT NULL,
PRIMARY KEY (address_version, pubkey_id, block_id),
FOREIGN KEY (pubkey_id) REFERENCES pubkey (pubkey_id),
FOREIGN KEY (block_id) REFERENCES block (block_id)
)""")
store.ddl(
"""CREATE INDEX x_abe_firstbits
ON abe_firstbits (address_version, firstbits)""")
else:
store.config['use_firstbits'] = "false"
store.config['keep_scriptsig'] = \
"true" if store.args.keep_scriptsig else "false"
store.save_config()
store.commit()
def insert_chain(store, chain):
chain.id = store.new_id("chain")
store.sql("""
INSERT INTO chain (
chain_id, chain_magic, chain_name, chain_code3,
chain_address_version, chain_script_addr_vers, chain_policy, chain_decimals
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
(chain.id, store.binin(chain.magic), chain.name,
chain.code3, store.binin(chain.address_version), store.binin(chain.script_addr_vers),
chain.policy, chain.decimals))
def get_lock(store):
if store.version_below('Abe26'):
return None
conn = store.connect()
cur = conn.cursor()
cur.execute("UPDATE abe_lock SET pid = %d WHERE lock_id = 1"
% (os.getpid(),))
if cur.rowcount != 1:
raise Exception("unexpected rowcount")
cur.close()
# Check whether database supports concurrent updates. Where it
# doesn't (SQLite) we get exclusive access automatically.
try:
import random
letters = "".join([chr(random.randint(65, 90)) for x in xrange(10)])
store.sql("""
INSERT INTO configvar (configvar_name, configvar_value)
VALUES (?, ?)""",
("upgrade-lock-" + letters, 'x'))
except Exception:
store.release_lock(conn)
conn = None
store.rollback()
# XXX Should reread config.
return conn
def release_lock(store, conn):
if conn:
conn.rollback()
conn.close()
def version_below(store, vers):
try:
sv = float(store.config['schema_version'].replace(SCHEMA_TYPE, ''))
except ValueError:
return False
vers = float(vers.replace(SCHEMA_TYPE, ''))
return sv < vers
def configure(store):
config = store._sql.configure()
store.init_binfuncs()
for name in config.keys():
store.config['sql.' + name] = config[name]
def save_config(store):
store.config['schema_version'] = SCHEMA_VERSION
for name in store.config.keys():
store.save_configvar(name)
def save_configvar(store, name):
store.sql("UPDATE configvar SET configvar_value = ?"
" WHERE configvar_name = ?", (store.config[name], name))
if store.rowcount() == 0:
store.sql("INSERT INTO configvar (configvar_name, configvar_value)"
" VALUES (?, ?)", (name, store.config[name]))
def set_configvar(store, name, value):
store.config[name] = value
store.save_configvar(name)
def cache_block(store, block_id, height, prev_id, search_id):
assert isinstance(block_id, int), block_id
assert isinstance(height, int), height
assert prev_id is None or isinstance(prev_id, int)
assert search_id is None or isinstance(search_id, int)
block = {
'height': height,
'prev_id': prev_id,
'search_id': search_id}
store._blocks[block_id] = block
return block
def _load_block(store, block_id):
block = store._blocks.get(block_id)
if block is None:
row = store.selectrow("""
SELECT block_height, prev_block_id, search_block_id
FROM block
WHERE block_id = ?""", (block_id,))
if row is None:
return None
height, prev_id, search_id = row
block = store.cache_block(
block_id, int(height),
None if prev_id is None else int(prev_id),
None if search_id is None else int(search_id))
return block
def get_block_id_at_height(store, height, descendant_id):
if height is None:
return None
while True:
block = store._load_block(descendant_id)
if block['height'] == height:
return descendant_id
descendant_id = block[
'search_id'
if util.get_search_height(block['height']) >= height else
'prev_id']
def is_descended_from(store, block_id, ancestor_id):
# ret = store._is_descended_from(block_id, ancestor_id)
# store.log.debug("%d is%s descended from %d", block_id, '' if ret else ' NOT', ancestor_id)
# return ret
# def _is_descended_from(store, block_id, ancestor_id):
block = store._load_block(block_id)
ancestor = store._load_block(ancestor_id)
height = ancestor['height']
return block['height'] >= height and \
store.get_block_id_at_height(height, block_id) == ancestor_id
def get_block_height(store, block_id):
return store._load_block(int(block_id))['height']
def find_prev(store, hash):
row = store.selectrow("""
SELECT block_id, block_height, block_chain_work,
block_total_satoshis, block_total_seconds,
block_satoshi_seconds, block_total_ss, block_nTime
FROM block
WHERE block_hash=?""", (store.hashin(hash),))
if row is None:
return (None, None, None, None, None, None, None, None)
(id, height, chain_work, satoshis, seconds, satoshi_seconds,
total_ss, nTime) = row
return (id, None if height is None else int(height),
store.binout_int(chain_work),
None if satoshis is None else int(satoshis),
None if seconds is None else int(seconds),
None if satoshi_seconds is None else int(satoshi_seconds),
None if total_ss is None else int(total_ss),
int(nTime))
def import_block(store, b, chain_ids=None, chain=None):
# Import new transactions.
if chain_ids is None:
chain_ids = frozenset() if chain is None else frozenset([chain.id])
b['value_in'] = 0
b['value_out'] = 0
b['value_destroyed'] = 0
tx_hash_array = []
# In the common case, all the block's txins _are_ linked, and we
# can avoid a query if we notice this.
all_txins_linked = True
for pos in xrange(len(b['transactions'])):
tx = b['transactions'][pos]
if 'hash' not in tx:
if chain is None:
store.log.debug("Falling back to SHA256 transaction hash")
tx['hash'] = util.double_sha256(tx['__data__'])
else:
tx['hash'] = chain.transaction_hash(tx['__data__'])
tx_hash_array.append(tx['hash'])
tx['tx_id'] = store.tx_find_id_and_value(tx, pos == 0)
if tx['tx_id']:
all_txins_linked = False
else:
if store.commit_bytes == 0:
tx['tx_id'] = store.import_and_commit_tx(tx, pos == 0, chain)
else:
tx['tx_id'] = store.import_tx(tx, pos == 0, chain)
if tx.get('unlinked_count', 1) > 0:
all_txins_linked = False
if tx['value_in'] is None:
b['value_in'] = None
elif b['value_in'] is not None:
b['value_in'] += tx['value_in']
b['value_out'] += tx['value_out']
b['value_destroyed'] += tx['value_destroyed']
# Get a new block ID.
block_id = int(store.new_id("block"))
b['block_id'] = block_id
if chain is not None:
# Verify Merkle root.
if b['hashMerkleRoot'] != chain.merkle_root(tx_hash_array):
raise MerkleRootMismatch(b['hash'], tx_hash_array)
# Look for the parent block.
hashPrev = b['hashPrev']
if chain is None:
# XXX No longer used.
is_genesis = hashPrev == util.GENESIS_HASH_PREV
else:
is_genesis = hashPrev == chain.genesis_hash_prev
(prev_block_id, prev_height, prev_work, prev_satoshis,
prev_seconds, prev_ss, prev_total_ss, prev_nTime) = (
(None, -1, 0, 0, 0, 0, 0, b['nTime'])
if is_genesis else
store.find_prev(hashPrev))
b['prev_block_id'] = prev_block_id
b['height'] = None if prev_height is None else prev_height + 1
b['chain_work'] = util.calculate_work(prev_work, b['nBits'])
if prev_seconds is None:
b['seconds'] = None
else:
b['seconds'] = prev_seconds + b['nTime'] - prev_nTime
if prev_satoshis is None or prev_satoshis < 0 or b['value_in'] is None:
# XXX Abuse this field to save work in adopt_orphans.
b['satoshis'] = -1 - b['value_destroyed']
else:
b['satoshis'] = prev_satoshis + b['value_out'] - b['value_in'] \
- b['value_destroyed']
if prev_satoshis is None or prev_satoshis < 0:
ss_created = None
b['total_ss'] = None
else:
ss_created = prev_satoshis * (b['nTime'] - prev_nTime)
b['total_ss'] = prev_total_ss + ss_created
if b['height'] is None or b['height'] < 2:
b['search_block_id'] = None
else:
b['search_block_id'] = store.get_block_id_at_height(
util.get_search_height(int(b['height'])),
None if prev_block_id is None else int(prev_block_id))
# Insert the block table row.
try:
store.sql(
"""INSERT INTO block (
block_id, block_hash, block_version, block_hashMerkleRoot,
block_nTime, block_nBits, block_nNonce, block_height,
prev_block_id, block_chain_work, block_value_in,
block_value_out, block_total_satoshis,
block_total_seconds, block_total_ss, block_num_tx,
search_block_id
) VALUES (
?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?
)""",
(block_id, store.hashin(b['hash']), store.intin(b['version']),
store.hashin(b['hashMerkleRoot']), store.intin(b['nTime']),
store.intin(b['nBits']), store.intin(b['nNonce']),
b['height'], prev_block_id,
store.binin_int(b['chain_work'], WORK_BITS),
store.intin(b['value_in']), store.intin(b['value_out']),
store.intin(b['satoshis']), store.intin(b['seconds']),
store.intin(b['total_ss']),
len(b['transactions']), b['search_block_id']))
except store.dbmodule.DatabaseError:
if store.commit_bytes == 0:
# Rollback won't undo any previous changes, since we
# always commit.
store.rollback()
# If the exception is due to another process having
# inserted the same block, it is okay.
row = store.selectrow("""
SELECT block_id, block_satoshi_seconds
FROM block
WHERE block_hash = ?""",
(store.hashin(b['hash']),))
if row:
store.log.info("Block already inserted; block_id %d unsued",
block_id)
b['block_id'] = int(row[0])
b['ss'] = None if row[1] is None else int(row[1])
store.offer_block_to_chains(b, chain_ids)
return
# This is not an expected error, or our caller may have to
# rewind a block file. Let them deal with it.
raise
# List the block's transactions in block_tx.
for tx_pos in xrange(len(b['transactions'])):
tx = b['transactions'][tx_pos]
store.sql("""
INSERT INTO block_tx
(block_id, tx_id, tx_pos)
VALUES (?, ?, ?)""",
(block_id, tx['tx_id'], tx_pos))
store.log.info("block_tx %d %d", block_id, tx['tx_id'])
if b['height'] is not None:
store._populate_block_txin(block_id)
if all_txins_linked or not store._has_unlinked_txins(block_id):
b['ss_destroyed'] = store._get_block_ss_destroyed(
block_id, b['nTime'],
map(lambda tx: tx['tx_id'], b['transactions']))
if ss_created is None or prev_ss is None:
b['ss'] = None
else:
b['ss'] = prev_ss + ss_created - b['ss_destroyed']
store.sql("""
UPDATE block
SET block_satoshi_seconds = ?,
block_ss_destroyed = ?
WHERE block_id = ?""",
(store.intin(b['ss']),
store.intin(b['ss_destroyed']),
block_id))
else:
b['ss_destroyed'] = None
b['ss'] = None
# Store the inverse hashPrev relationship or mark the block as
# an orphan.
if prev_block_id:
store.sql("""
INSERT INTO block_next (block_id, next_block_id)
VALUES (?, ?)""", (prev_block_id, block_id))
elif not is_genesis:
store.sql("INSERT INTO orphan_block (block_id, block_hashPrev)" +
" VALUES (?, ?)", (block_id, store.hashin(b['hashPrev'])))
for row in store.selectall("""
SELECT block_id FROM orphan_block WHERE block_hashPrev = ?""",
(store.hashin(b['hash']),)):
(orphan_id,) = row
store.sql("UPDATE block SET prev_block_id = ? WHERE block_id = ?",
(block_id, orphan_id))
store.sql("""
INSERT INTO block_next (block_id, next_block_id)
VALUES (?, ?)""", (block_id, orphan_id))
store.sql("DELETE FROM orphan_block WHERE block_id = ?",
(orphan_id,))
# offer_block_to_chains calls adopt_orphans, which propagates
# block_height and other cumulative data to the blocks
# attached above.
store.offer_block_to_chains(b, chain_ids)
return block_id
def _populate_block_txin(store, block_id):
# Create rows in block_txin. In case of duplicate transactions,
# choose the one with the lowest block ID. XXX For consistency,
# it should be the lowest height instead of block ID.
for row in store.selectall("""
SELECT txin.txin_id, MIN(obt.block_id)
FROM block_tx bt
JOIN txin ON (txin.tx_id = bt.tx_id)
JOIN txout ON (txin.txout_id = txout.txout_id)
JOIN block_tx obt ON (txout.tx_id = obt.tx_id)
JOIN block ob ON (obt.block_id = ob.block_id)
WHERE bt.block_id = ?
AND ob.block_chain_work IS NOT NULL
GROUP BY txin.txin_id""", (block_id,)):
(txin_id, oblock_id) = row
if store.is_descended_from(block_id, int(oblock_id)):
store.sql("""
INSERT INTO block_txin (block_id, txin_id, out_block_id)
VALUES (?, ?, ?)""",
(block_id, txin_id, oblock_id))
def _has_unlinked_txins(store, block_id):
(unlinked_count,) = store.selectrow("""
SELECT COUNT(1)
FROM block_tx bt
JOIN txin ON (bt.tx_id = txin.tx_id)
JOIN unlinked_txin u ON (txin.txin_id = u.txin_id)
WHERE bt.block_id = ?""", (block_id,))
return unlinked_count > 0
def _get_block_ss_destroyed(store, block_id, nTime, tx_ids):
block_ss_destroyed = 0
for tx_id in tx_ids:
destroyed = int(store.selectrow("""
SELECT COALESCE(SUM(txout_approx.txout_approx_value *
(? - b.block_nTime)), 0)
FROM block_txin bti
JOIN txin ON (bti.txin_id = txin.txin_id)
JOIN txout_approx ON (txin.txout_id = txout_approx.txout_id)
JOIN block_tx obt ON (txout_approx.tx_id = obt.tx_id)
JOIN block b ON (obt.block_id = b.block_id)
WHERE bti.block_id = ? AND txin.tx_id = ?""",
(nTime, block_id, tx_id))[0])
block_ss_destroyed += destroyed
return block_ss_destroyed
# Propagate cumulative values to descendant blocks. Return info
# about the longest chains containing b. The returned dictionary
# is keyed by the chain_id of a chain whose validation policy b
# satisfies. Each value is a pair (block, work) where block is
# the best block descended from b in the given chain, and work is
# the sum of orphan_work and the work between b and block. Only
# chains in chain_mask are considered. Even if no known chain
# contains b, this routine populates any descendant blocks'
# cumulative statistics that are known for b and returns an empty
# dictionary.
def adopt_orphans(store, b, orphan_work, chain_ids, chain_mask):
# XXX As originally written, this function occasionally hit
# Python's recursion limit. I am rewriting it iteratively
# with minimal changes, hence the odd style. Guido is
# particularly unhelpful here, rejecting even labeled loops.
ret = [None]
def receive(x):
ret[0] = x
def doit():
store._adopt_orphans_1(stack)
stack = [receive, chain_mask, chain_ids, orphan_work, b, doit]
while stack:
stack.pop()()
return ret[0]
def _adopt_orphans_1(store, stack):
def doit():
store._adopt_orphans_1(stack)
def continuation(x):
store._adopt_orphans_2(stack, x)
def didit():
ret = stack.pop()
stack.pop()(ret)
b = stack.pop()
orphan_work = stack.pop()
chain_ids = stack.pop()
chain_mask = stack.pop()
ret = {}
stack += [ ret, didit ]
block_id = b['block_id']
height = None if b['height'] is None else int(b['height'] + 1)
# If adding block b, b will not yet be in chain_candidate, so
# we rely on the chain_ids argument. If called recursively,
# look up chain_ids in chain_candidate.
if not chain_ids:
if chain_mask:
chain_mask = chain_mask.intersection(
store.find_chains_containing_block(block_id))
chain_ids = chain_mask
for chain_id in chain_ids:
ret[chain_id] = (b, orphan_work)
for row in store.selectall("""
SELECT bn.next_block_id, b.block_nBits,
b.block_value_out, b.block_value_in, b.block_nTime,
b.block_total_satoshis
FROM block_next bn
JOIN block b ON (bn.next_block_id = b.block_id)
WHERE bn.block_id = ?""", (block_id,)):
next_id, nBits, value_out, value_in, nTime, satoshis = row
nBits = int(nBits)
nTime = int(nTime)
satoshis = None if satoshis is None else int(satoshis)
new_work = util.calculate_work(orphan_work, nBits)
if b['chain_work'] is None:
chain_work = None
else:
chain_work = b['chain_work'] + new_work - orphan_work
if value_in is None:
value, count1, count2 = store.selectrow("""
SELECT SUM(txout.txout_value),
COUNT(1),
COUNT(txout.txout_value)
FROM block_tx bt
JOIN txin ON (bt.tx_id = txin.tx_id)
LEFT JOIN txout ON (txout.txout_id = txin.txout_id)
WHERE bt.block_id = ?""", (next_id,))
if count1 == count2 + 1:
value_in = int(value)
else:
store.log.warning(
"not updating block %d value_in: %s != %s + 1",
next_id, repr(count1), repr(count2))
else:
value_in = int(value_in)
generated = None if value_in is None else int(value_out - value_in)
if b['seconds'] is None:
seconds = None
total_ss = None
else:
new_seconds = nTime - b['nTime']
seconds = b['seconds'] + new_seconds
if b['total_ss'] is None or b['satoshis'] is None:
total_ss = None
else:
total_ss = b['total_ss'] + new_seconds * b['satoshis']
if satoshis < 0 and b['satoshis'] is not None and \
b['satoshis'] >= 0 and generated is not None:
satoshis += 1 + b['satoshis'] + generated
if height is None or height < 2:
search_block_id = None
else:
search_block_id = store.get_block_id_at_height(
util.get_search_height(height), int(block_id))
store.sql("""
UPDATE block
SET block_height = ?,
block_chain_work = ?,
block_value_in = ?,
block_total_seconds = ?,
block_total_satoshis = ?,
block_total_ss = ?,
search_block_id = ?
WHERE block_id = ?""",
(height, store.binin_int(chain_work, WORK_BITS),
store.intin(value_in),
store.intin(seconds), store.intin(satoshis),
store.intin(total_ss), search_block_id,
next_id))
ss = None
if height is not None:
store.sql("""
UPDATE chain_candidate SET block_height = ?
WHERE block_id = ?""",
(height, next_id))
store._populate_block_txin(int(next_id))
if b['ss'] is None or store._has_unlinked_txins(next_id):
pass
else:
tx_ids = map(
lambda row: row[0],
store.selectall("""
SELECT tx_id
FROM block_tx
WHERE block_id = ?""", (next_id,)))
destroyed = store._get_block_ss_destroyed(
next_id, nTime, tx_ids)
ss = b['ss'] + b['satoshis'] * (nTime - b['nTime']) \
- destroyed
store.sql("""
UPDATE block
SET block_satoshi_seconds = ?,
block_ss_destroyed = ?
WHERE block_id = ?""",
(store.intin(ss),
store.intin(destroyed),
next_id))
if store.use_firstbits:
for (addr_vers,) in store.selectall("""
SELECT c.chain_address_version
FROM chain c
JOIN chain_candidate cc ON (c.chain_id = cc.chain_id)
WHERE cc.block_id = ?""", (next_id,)):
store.do_vers_firstbits(addr_vers, int(next_id))
nb = {
"block_id": next_id,
"height": height,
"chain_work": chain_work,
"nTime": nTime,
"seconds": seconds,
"satoshis": satoshis,
"total_ss": total_ss,
"ss": ss}
stack += [ret, continuation,
chain_mask, None, new_work, nb, doit]
def _adopt_orphans_2(store, stack, next_ret):
ret = stack.pop()
for chain_id in ret.keys():
pair = next_ret[chain_id]
if pair and pair[1] > ret[chain_id][1]:
ret[chain_id] = pair
def _export_scriptPubKey(store, txout, chain, scriptPubKey):
"""In txout, set script_type, address_version, binaddr, and for multisig, required_signatures."""
if scriptPubKey is None:
txout['script_type'] = None
txout['binaddr'] = None
return
script_type, data = chain.parse_txout_script(scriptPubKey)
txout['script_type'] = script_type
txout['address_version'] = chain.address_version
if script_type == Chain.SCRIPT_TYPE_PUBKEY:
txout['binaddr'] = chain.pubkey_hash(data)
elif script_type == Chain.SCRIPT_TYPE_ADDRESS:
txout['binaddr'] = data
elif script_type == Chain.SCRIPT_TYPE_P2SH:
txout['address_version'] = chain.script_addr_vers
txout['binaddr'] = data
elif script_type == Chain.SCRIPT_TYPE_MULTISIG:
txout['required_signatures'] = data['m']
txout['binaddr'] = chain.pubkey_hash(scriptPubKey)
txout['subbinaddr'] = [
chain.pubkey_hash(pubkey)
for pubkey in data['pubkeys']
]
elif script_type == Chain.SCRIPT_TYPE_BURN:
txout['binaddr'] = NULL_PUBKEY_HASH
else:
txout['binaddr'] = None
def export_block(store, chain=None, block_hash=None, block_number=None):
"""
Return a dict with the following:
* chain_candidates[]
* chain
* in_longest
* chain_satoshis
* chain_satoshi_seconds
* chain_work
* fees
* generated
* hash
* hashMerkleRoot
* hashPrev
* height
* nBits
* next_block_hashes
* nNonce
* nTime
* satoshis_destroyed
* satoshi_seconds
* transactions[]
* fees
* hash
* in[]
* address_version
* binaddr
* value
* out[]
* address_version
* binaddr
* value
* size
* value_out
* version
Additionally, for multisig inputs and outputs:
* subbinaddr[]
* required_signatures
Additionally, for proof-of-stake chains:
* is_proof_of_stake
* proof_of_stake_generated
"""
if block_number is None and block_hash is None:
raise ValueError("export_block requires either block_hash or block_number")
where = []
bind = []
if chain is not None:
where.append('chain_id = ?')
bind.append(chain.id)
if block_hash is not None:
where.append('block_hash = ?')
bind.append(store.hashin_hex(block_hash))
if block_number is not None:
where.append('block_height = ? AND in_longest = 1')
bind.append(block_number)
sql = """
SELECT
chain_id,
in_longest,
block_id,
block_hash,
block_version,
block_hashMerkleRoot,
block_nTime,
block_nBits,
block_nNonce,
block_height,
prev_block_hash,
block_chain_work,
block_value_in,
block_value_out,
block_total_satoshis,
block_total_seconds,
block_satoshi_seconds,
block_total_ss,
block_ss_destroyed,
block_num_tx
FROM chain_summary
WHERE """ + ' AND '.join(where) + """
ORDER BY
in_longest DESC,
chain_id DESC"""
rows = store.selectall(sql, bind)
if len(rows) == 0:
return None
row = rows[0][2:]
def parse_cc(row):
chain_id, in_longest = row[:2]
return { "chain": store.get_chain_by_id(chain_id), "in_longest": in_longest }
# Absent the chain argument, default to highest chain_id, preferring to avoid side chains.
cc = map(parse_cc, rows)
# "chain" may be None, but "found_chain" will not.
found_chain = chain
if found_chain is None:
if len(cc) > 0:
found_chain = cc[0]['chain']
else:
# Should not normally get here.
found_chain = store.get_default_chain()
(block_id, block_hash, block_version, hashMerkleRoot,
nTime, nBits, nNonce, height,
prev_block_hash, block_chain_work, value_in, value_out,
satoshis, seconds, ss, total_ss, destroyed, num_tx) = (
row[0], store.hashout_hex(row[1]), row[2],
store.hashout_hex(row[3]), row[4], int(row[5]), row[6],
row[7], store.hashout_hex(row[8]),
store.binout_int(row[9]), int(row[10]), int(row[11]),
None if row[12] is None else int(row[12]),
None if row[13] is None else int(row[13]),
None if row[14] is None else int(row[14]),
None if row[15] is None else int(row[15]),
None if row[16] is None else int(row[16]),
int(row[17]),
)
next_hashes = [
store.hashout_hex(hash) for hash, il in
store.selectall("""
SELECT DISTINCT n.block_hash, cc.in_longest
FROM block_next bn
JOIN block n ON (bn.next_block_id = n.block_id)
JOIN chain_candidate cc ON (n.block_id = cc.block_id)
WHERE bn.block_id = ?
ORDER BY cc.in_longest DESC""",
(block_id,)) ]
tx_ids = []
txs = {}
block_out = 0
block_in = 0
for row in store.selectall("""
SELECT tx_id, tx_hash, tx_size, txout_value, txout_scriptPubKey
FROM txout_detail
WHERE block_id = ?
ORDER BY tx_pos, txout_pos
""", (block_id,)):
tx_id, tx_hash, tx_size, txout_value, scriptPubKey = (
row[0], row[1], row[2], int(row[3]), store.binout(row[4]))
tx = txs.get(tx_id)
if tx is None:
tx_ids.append(tx_id)
txs[tx_id] = {
"hash": store.hashout_hex(tx_hash),
"total_out": 0,
"total_in": 0,
"out": [],
"in": [],
"size": int(tx_size),
}
tx = txs[tx_id]
tx['total_out'] += txout_value
block_out += txout_value
txout = { 'value': txout_value }
store._export_scriptPubKey(txout, found_chain, scriptPubKey)
tx['out'].append(txout)
for row in store.selectall("""
SELECT tx_id, txin_value, txin_scriptPubKey
FROM txin_detail
WHERE block_id = ?
ORDER BY tx_pos, txin_pos
""", (block_id,)):
tx_id, txin_value, scriptPubKey = (
row[0], 0 if row[1] is None else int(row[1]),
store.binout(row[2]))
tx = txs.get(tx_id)
if tx is None:
# Strange, inputs but no outputs?
tx_ids.append(tx_id)
tx_hash, tx_size = store.selectrow("""
SELECT tx_hash, tx_size FROM tx WHERE tx_id = ?""",
(tx_id,))
txs[tx_id] = {
"hash": store.hashout_hex(tx_hash),
"total_out": 0,
"total_in": 0,
"out": [],
"in": [],
"size": int(tx_size),
}
tx = txs[tx_id]
tx['total_in'] += txin_value
block_in += txin_value
txin = { 'value': txin_value }
store._export_scriptPubKey(txin, found_chain, scriptPubKey)
tx['in'].append(txin)
generated = block_out - block_in
coinbase_tx = txs[tx_ids[0]]
coinbase_tx['fees'] = 0
block_fees = coinbase_tx['total_out'] - generated
b = {
'chain_candidates': cc,
'chain_satoshis': satoshis,
'chain_satoshi_seconds': total_ss,
'chain_work': block_chain_work,
'fees': block_fees,
'generated': generated,
'hash': block_hash,
'hashMerkleRoot': hashMerkleRoot,
'hashPrev': prev_block_hash,
'height': height,
'nBits': nBits,
'next_block_hashes': next_hashes,
'nNonce': nNonce,
'nTime': nTime,
'satoshis_destroyed': destroyed,
'satoshi_seconds': ss,
'transactions': [txs[tx_id] for tx_id in tx_ids],
'value_out': block_out,
'version': block_version,
}
is_stake_chain = chain is not None and chain.has_feature('nvc_proof_of_stake')
if is_stake_chain:
# Proof-of-stake display based loosely on CryptoManiac/novacoin and
# http://nvc.cryptocoinexplorer.com.
b['is_proof_of_stake'] = len(tx_ids) > 1 and coinbase_tx['total_out'] == 0
for tx_id in tx_ids[1:]:
tx = txs[tx_id]
tx['fees'] = tx['total_in'] - tx['total_out']
if is_stake_chain and b['is_proof_of_stake']:
b['proof_of_stake_generated'] = -txs[tx_ids[1]]['fees']
txs[tx_ids[1]]['fees'] = 0
b['fees'] += b['proof_of_stake_generated']
return b
def tx_find_id_and_value(store, tx, is_coinbase):
row = store.selectrow("""
SELECT tx.tx_id, SUM(txout.txout_value), SUM(
CASE WHEN txout.pubkey_id > 0 THEN txout.txout_value
ELSE 0 END)
FROM tx
LEFT JOIN txout ON (tx.tx_id = txout.tx_id)
WHERE tx_hash = ?
GROUP BY tx.tx_id""",
(store.hashin(tx['hash']),))
if row:
tx_id, value_out, undestroyed = row
value_out = 0 if value_out is None else int(value_out)
undestroyed = 0 if undestroyed is None else int(undestroyed)
count_in, value_in = store.selectrow("""
SELECT COUNT(1), SUM(prevout.txout_value)
FROM txin
JOIN txout prevout ON (txin.txout_id = prevout.txout_id)
WHERE txin.tx_id = ?""", (tx_id,))
if (count_in or 0) < len(tx['txIn']):
value_in = 0 if is_coinbase else None
tx['value_in'] = None if value_in is None else int(value_in)
tx['value_out'] = value_out
tx['value_destroyed'] = value_out - undestroyed
return tx_id
return None
def import_tx(store, tx, is_coinbase, chain):
tx_id = store.new_id("tx")
dbhash = store.hashin(tx['hash'])
if 'size' not in tx:
tx['size'] = len(tx['__data__'])
store.sql("""
INSERT INTO tx (tx_id, tx_hash, tx_version, tx_lockTime, tx_size)
VALUES (?, ?, ?, ?, ?)""",
(tx_id, dbhash, store.intin(tx['version']),
store.intin(tx['lockTime']), tx['size']))
# Import transaction outputs.
tx['value_out'] = 0
tx['value_destroyed'] = 0
for pos in xrange(len(tx['txOut'])):
txout = tx['txOut'][pos]
tx['value_out'] += txout['value']
txout_id = store.new_id("txout")
pubkey_id = store.script_to_pubkey_id(chain, txout['scriptPubKey'])
if pubkey_id is not None and pubkey_id <= 0:
tx['value_destroyed'] += txout['value']
store.sql("""
INSERT INTO txout (
txout_id, tx_id, txout_pos, txout_value,
txout_scriptPubKey, pubkey_id
) VALUES (?, ?, ?, ?, ?, ?)""",
(txout_id, tx_id, pos, store.intin(txout['value']),
store.binin(txout['scriptPubKey']), pubkey_id))
for row in store.selectall("""
SELECT txin_id
FROM unlinked_txin
WHERE txout_tx_hash = ?
AND txout_pos = ?""", (dbhash, pos)):
(txin_id,) = row
store.sql("UPDATE txin SET txout_id = ? WHERE txin_id = ?",
(txout_id, txin_id))
store.sql("DELETE FROM unlinked_txin WHERE txin_id = ?",
(txin_id,))
# Import transaction inputs.
tx['value_in'] = 0
tx['unlinked_count'] = 0
for pos in xrange(len(tx['txIn'])):
txin = tx['txIn'][pos]
txin_id = store.new_id("txin")
if is_coinbase:
txout_id = None
else:
txout_id, value = store.lookup_txout(
txin['prevout_hash'], txin['prevout_n'])
if value is None:
tx['value_in'] = None
elif tx['value_in'] is not None:
tx['value_in'] += value
store.sql("""
INSERT INTO txin (
txin_id, tx_id, txin_pos, txout_id""" + (""",
txin_scriptSig, txin_sequence""" if store.keep_scriptsig
else "") + """
) VALUES (?, ?, ?, ?""" + (", ?, ?" if store.keep_scriptsig
else "") + """)""",
(txin_id, tx_id, pos, txout_id,
store.binin(txin['scriptSig']),
store.intin(txin['sequence'])) if store.keep_scriptsig
else (txin_id, tx_id, pos, txout_id))
if not is_coinbase and txout_id is None:
tx['unlinked_count'] += 1
store.sql("""
INSERT INTO unlinked_txin (
txin_id, txout_tx_hash, txout_pos
) VALUES (?, ?, ?)""",
(txin_id, store.hashin(txin['prevout_hash']),
store.intin(txin['prevout_n'])))
# XXX Could populate PUBKEY.PUBKEY with txin scripts...
# or leave that to an offline process. Nothing in this program
# requires them.
return tx_id
def import_and_commit_tx(store, tx, is_coinbase, chain):
try:
tx_id = store.import_tx(tx, is_coinbase, chain)
store.commit()
except store.dbmodule.DatabaseError:
store.rollback()
# Violation of tx_hash uniqueness?
tx_id = store.tx_find_id_and_value(tx, is_coinbase)
if not tx_id:
raise
return tx_id
def maybe_import_binary_tx(store, chain_name, binary_tx):
if chain_name is None:
chain = store.get_default_chain()
else:
chain = store.get_chain_by_name(chain_name)
tx_hash = chain.transaction_hash(binary_tx)
(count,) = store.selectrow(
"SELECT COUNT(1) FROM tx WHERE tx_hash = ?",
(store.hashin(tx_hash),))
if count == 0:
tx = chain.parse_transaction(binary_tx)
tx['hash'] = tx_hash
store.import_tx(tx, chain.is_coinbase_tx(tx), chain)
store.imported_bytes(tx['size'])
def export_tx(store, tx_id=None, tx_hash=None, decimals=8, format="api", chain=None):
"""Return a dict as seen by /rawtx or None if not found."""
# TODO: merge _export_tx_detail with export_tx.
if format == 'browser':
return store._export_tx_detail(tx_hash, chain=chain)
tx = {}
is_bin = format == "binary"
if tx_id is not None:
row = store.selectrow("""
SELECT tx_hash, tx_version, tx_lockTime, tx_size
FROM tx
WHERE tx_id = ?
""", (tx_id,))
if row is None:
return None
tx['hash'] = store.hashout_hex(row[0])
elif tx_hash is not None:
row = store.selectrow("""
SELECT tx_id, tx_version, tx_lockTime, tx_size
FROM tx
WHERE tx_hash = ?
""", (store.hashin_hex(tx_hash),))
if row is None:
return None
tx['hash'] = tx_hash.decode('hex')[::-1] if is_bin else tx_hash
tx_id = row[0]
else:
raise ValueError("export_tx requires either tx_id or tx_hash.")
tx['version' if is_bin else 'ver'] = int(row[1])
tx['lockTime' if is_bin else 'lock_time'] = int(row[2])
tx['size'] = int(row[3])
txins = []
tx['txIn' if is_bin else 'in'] = txins
for row in store.selectall("""
SELECT
COALESCE(tx.tx_hash, uti.txout_tx_hash),
COALESCE(txout.txout_pos, uti.txout_pos)""" + (""",
txin_scriptSig,
txin_sequence""" if store.keep_scriptsig else "") + """
FROM txin
LEFT JOIN txout ON (txin.txout_id = txout.txout_id)
LEFT JOIN tx ON (txout.tx_id = tx.tx_id)
LEFT JOIN unlinked_txin uti ON (txin.txin_id = uti.txin_id)
WHERE txin.tx_id = ?
ORDER BY txin.txin_pos""", (tx_id,)):
prevout_hash = row[0]
prevout_n = None if row[1] is None else int(row[1])
if is_bin:
txin = {
'prevout_hash': store.hashout(prevout_hash),
'prevout_n': prevout_n}
else:
if prevout_hash is None:
prev_out = {
'hash': "0" * 64, # XXX should store this?
'n': 0xffffffff} # XXX should store this?
else:
prev_out = {
'hash': store.hashout_hex(prevout_hash),
'n': prevout_n}
txin = {'prev_out': prev_out}
if store.keep_scriptsig:
scriptSig = row[2]
sequence = row[3]
if is_bin:
txin['scriptSig'] = store.binout(scriptSig)
else:
txin['raw_scriptSig'] = store.binout_hex(scriptSig)
txin['sequence'] = None if sequence is None else int(sequence)
txins.append(txin)
txouts = []
tx['txOut' if is_bin else 'out'] = txouts
for satoshis, scriptPubKey in store.selectall("""
SELECT txout_value, txout_scriptPubKey
FROM txout
WHERE tx_id = ?
ORDER BY txout_pos""", (tx_id,)):
if is_bin:
txout = {
'value': int(satoshis),
'scriptPubKey': store.binout(scriptPubKey)}
else:
coin = 10 ** decimals
satoshis = int(satoshis)
integer = satoshis / coin
frac = satoshis % coin
txout = {
'value': ("%%d.%%0%dd" % (decimals,)) % (integer, frac),
'raw_scriptPubKey': store.binout_hex(scriptPubKey)}
txouts.append(txout)
if not is_bin:
tx['vin_sz'] = len(txins)
tx['vout_sz'] = len(txouts)
return tx
def _export_tx_detail(store, tx_hash, chain):
try:
dbhash = store.hashin_hex(tx_hash)
except TypeError:
raise MalformedHash()
row = store.selectrow("""
SELECT tx_id, tx_version, tx_lockTime, tx_size
FROM tx
WHERE tx_hash = ?
""", (dbhash,))
if row is None:
return None
tx_id = int(row[0])
tx = {
'hash': tx_hash,
'version': int(row[1]),
'lockTime': int(row[2]),
'size': int(row[3]),
}
def parse_tx_cc(row):
return {
'chain': store.get_chain_by_id(row[0]),
'in_longest': int(row[1]),
'block_nTime': int(row[2]),
'block_height': None if row[3] is None else int(row[3]),
'block_hash': store.hashout_hex(row[4]),
'tx_pos': int(row[5])
}
tx['chain_candidates'] = map(parse_tx_cc, store.selectall("""
SELECT cc.chain_id, cc.in_longest,
b.block_nTime, b.block_height, b.block_hash,
block_tx.tx_pos
FROM chain_candidate cc
JOIN block b ON (b.block_id = cc.block_id)
JOIN block_tx ON (block_tx.block_id = b.block_id)
WHERE block_tx.tx_id = ?
ORDER BY cc.chain_id, cc.in_longest DESC, b.block_hash
""", (tx_id,)))
if chain is None:
if len(tx['chain_candidates']) > 0:
chain = tx['chain_candidates'][0]['chain']
else:
chain = store.get_default_chain()
def parse_row(row):
pos, script, value, o_hash, o_pos = row[:5]
script = store.binout(script)
scriptPubKey = store.binout(row[5]) if len(row) >5 else script
ret = {
"pos": int(pos),
"binscript": script,
"value": None if value is None else int(value),
"o_hash": store.hashout_hex(o_hash),
"o_pos": None if o_pos is None else int(o_pos),
}
store._export_scriptPubKey(ret, chain, scriptPubKey)
return ret
# XXX Unneeded outer join.
tx['in'] = map(parse_row, store.selectall("""
SELECT
txin.txin_pos""" + (""",
txin.txin_scriptSig""" if store.keep_scriptsig else """,
NULL""") + """,
txout.txout_value,
COALESCE(prevtx.tx_hash, u.txout_tx_hash),
COALESCE(txout.txout_pos, u.txout_pos),
txout.txout_scriptPubKey
FROM txin
LEFT JOIN txout ON (txout.txout_id = txin.txout_id)
LEFT JOIN tx prevtx ON (txout.tx_id = prevtx.tx_id)
LEFT JOIN unlinked_txin u ON (u.txin_id = txin.txin_id)
WHERE txin.tx_id = ?
ORDER BY txin.txin_pos
""", (tx_id,)))
# XXX Only one outer join needed.
tx['out'] = map(parse_row, store.selectall("""
SELECT
txout.txout_pos,
txout.txout_scriptPubKey,
txout.txout_value,
nexttx.tx_hash,
txin.txin_pos
FROM txout
LEFT JOIN txin ON (txin.txout_id = txout.txout_id)
LEFT JOIN tx nexttx ON (txin.tx_id = nexttx.tx_id)
WHERE txout.tx_id = ?
ORDER BY txout.txout_pos
""", (tx_id,)))
def sum_values(rows):
ret = 0
for row in rows:
if row['value'] is None:
return None
ret += row['value']
return ret
tx['value_in'] = sum_values(tx['in'])
tx['value_out'] = sum_values(tx['out'])
return tx
def export_address_history(store, address, chain=None, max_rows=-1, types=frozenset(['direct', 'escrow'])):
version, binaddr = util.decode_check_address(address)
if binaddr is None:
raise MalformedAddress("Invalid address")
balance = {}
received = {}
sent = {}
counts = [0, 0]
chains = []
def adj_balance(txpoint):
chain = txpoint['chain']
if chain.id not in balance:
chains.append(chain)
balance[chain.id] = 0
received[chain.id] = 0
sent[chain.id] = 0
if txpoint['type'] == 'direct':
value = txpoint['value']
balance[chain.id] += value
if txpoint['is_out']:
sent[chain.id] -= value
else:
received[chain.id] += value
counts[txpoint['is_out']] += 1
dbhash = store.binin(binaddr)
txpoints = []
def parse_row(is_out, row_type, nTime, chain_id, height, blk_hash, tx_hash, pos, value, script=None):
chain = store.get_chain_by_id(chain_id)
txpoint = {
'type': row_type,
'is_out': int(is_out),
'nTime': int(nTime),
'chain': chain,
'height': int(height),
'blk_hash': store.hashout_hex(blk_hash),
'tx_hash': store.hashout_hex(tx_hash),
'pos': int(pos),
'value': int(value),
}
if script is not None:
store._export_scriptPubKey(txpoint, chain, store.binout(script))
return txpoint
def parse_direct_in(row): return parse_row(True, 'direct', *row)
def parse_direct_out(row): return parse_row(False, 'direct', *row)
def parse_escrow_in(row): return parse_row(True, 'escrow', *row)
def parse_escrow_out(row): return parse_row(False, 'escrow', *row)
def get_received(escrow):
return store.selectall("""
SELECT
b.block_nTime,
cc.chain_id,
b.block_height,
b.block_hash,
tx.tx_hash,
txin.txin_pos,
-prevout.txout_value""" + (""",
prevout.txout_scriptPubKey""" if escrow else "") + """
FROM chain_candidate cc
JOIN block b ON (b.block_id = cc.block_id)
JOIN block_tx ON (block_tx.block_id = b.block_id)
JOIN tx ON (tx.tx_id = block_tx.tx_id)
JOIN txin ON (txin.tx_id = tx.tx_id)
JOIN txout prevout ON (txin.txout_id = prevout.txout_id)""" + ("""
JOIN multisig_pubkey mp ON (mp.multisig_id = prevout.pubkey_id)""" if escrow else "") + """
JOIN pubkey ON (pubkey.pubkey_id = """ + ("mp" if escrow else "prevout") + """.pubkey_id)
WHERE pubkey.pubkey_hash = ?
AND cc.in_longest = 1""" + ("" if max_rows < 0 else """
LIMIT ?"""),
(dbhash,)
if max_rows < 0 else
(dbhash, max_rows + 1))
def get_sent(escrow):
return store.selectall("""
SELECT
b.block_nTime,
cc.chain_id,
b.block_height,
b.block_hash,
tx.tx_hash,
txout.txout_pos,
txout.txout_value""" + (""",
txout.txout_scriptPubKey""" if escrow else "") + """
FROM chain_candidate cc
JOIN block b ON (b.block_id = cc.block_id)
JOIN block_tx ON (block_tx.block_id = b.block_id)
JOIN tx ON (tx.tx_id = block_tx.tx_id)
JOIN txout ON (txout.tx_id = tx.tx_id)""" + ("""
JOIN multisig_pubkey mp ON (mp.multisig_id = txout.pubkey_id)""" if escrow else "") + """
JOIN pubkey ON (pubkey.pubkey_id = """ + ("mp" if escrow else "txout") + """.pubkey_id)
WHERE pubkey.pubkey_hash = ?
AND cc.in_longest = 1""" + ("" if max_rows < 0 else """
LIMIT ?"""),
(dbhash, max_rows + 1)
if max_rows >= 0 else
(dbhash,))
if 'direct' in types:
in_rows = get_received(False)
if len(in_rows) > max_rows >= 0:
return None # XXX Could still show address basic data.
txpoints += map(parse_direct_in, in_rows)
out_rows = get_sent(False)
if len(out_rows) > max_rows >= 0:
return None
txpoints += map(parse_direct_out, out_rows)
if 'escrow' in types:
in_rows = get_received(True)
if len(in_rows) > max_rows >= 0:
return None
txpoints += map(parse_escrow_in, in_rows)
out_rows = get_sent(True)
if len(out_rows) > max_rows >= 0:
return None
txpoints += map(parse_escrow_out, out_rows)
def cmp_txpoint(p1, p2):
return cmp(p1['nTime'], p2['nTime']) \
or cmp(p1['is_out'], p2['is_out']) \
or cmp(p1['height'], p2['height']) \
or cmp(p1['chain'].name, p2['chain'].name)
txpoints.sort(cmp_txpoint)
for txpoint in txpoints:
adj_balance(txpoint)
hist = {
'binaddr': binaddr,
'version': version,
'chains': chains,
'txpoints': txpoints,
'balance': balance,
'sent': sent,
'received': received,
'counts': counts
}
# Show P2SH address components, if known.
# XXX With some more work, we could find required_signatures.
for (subbinaddr,) in store.selectall("""
SELECT sub.pubkey_hash
FROM multisig_pubkey mp
JOIN pubkey top ON (mp.multisig_id = top.pubkey_id)
JOIN pubkey sub ON (mp.pubkey_id = sub.pubkey_id)
WHERE top.pubkey_hash = ?""", (dbhash,)):
if 'subbinaddr' not in hist:
hist['subbinaddr'] = []
hist['subbinaddr'].append(store.binout(subbinaddr))
return hist
# Called to indicate that the given block has the correct magic
# number and policy for the given chains. Updates CHAIN_CANDIDATE
# and CHAIN.CHAIN_LAST_BLOCK_ID as appropriate.
def offer_block_to_chains(store, b, chain_ids):
b['top'] = store.adopt_orphans(b, 0, chain_ids, chain_ids)
for chain_id in chain_ids:
store._offer_block_to_chain(b, chain_id)
def _offer_block_to_chain(store, b, chain_id):
if b['chain_work'] is None:
in_longest = 0
else:
# Do we produce a chain longer than the current chain?
# Query whether the new block (or its tallest descendant)
# beats the current chain_last_block_id. Also check
# whether the current best is our top, which indicates
# this block is in longest; this can happen in database
# repair scenarios.
top = b['top'][chain_id][0]
row = store.selectrow("""
SELECT b.block_id, b.block_height, b.block_chain_work
FROM block b, chain c
WHERE c.chain_id = ?
AND b.block_id = c.chain_last_block_id""", (chain_id,))
if row:
loser_id, loser_height, loser_work = row
if loser_id != top['block_id'] and \
store.binout_int(loser_work) >= top['chain_work']:
row = None
if row:
# New longest chain.
in_longest = 1
to_connect = []
to_disconnect = []
winner_id = top['block_id']
winner_height = top['height']
while loser_height > winner_height:
to_disconnect.insert(0, loser_id)
loser_id = store.get_prev_block_id(loser_id)
loser_height -= 1
while winner_height > loser_height:
to_connect.insert(0, winner_id)
winner_id = store.get_prev_block_id(winner_id)
winner_height -= 1
loser_height = None
while loser_id != winner_id:
to_disconnect.insert(0, loser_id)
loser_id = store.get_prev_block_id(loser_id)
to_connect.insert(0, winner_id)
winner_id = store.get_prev_block_id(winner_id)
winner_height -= 1
for block_id in to_disconnect:
store.disconnect_block(block_id, chain_id)
for block_id in to_connect:
store.connect_block(block_id, chain_id)
elif b['hashPrev'] == store.get_chain_by_id(chain_id).genesis_hash_prev:
in_longest = 1 # Assume only one genesis block per chain. XXX
else:
in_longest = 0
store.sql("""
INSERT INTO chain_candidate (
chain_id, block_id, in_longest, block_height
) VALUES (?, ?, ?, ?)""",
(chain_id, b['block_id'], in_longest, b['height']))
if in_longest > 0:
store.sql("""
UPDATE chain
SET chain_last_block_id = ?
WHERE chain_id = ?""", (top['block_id'], chain_id))
if store.use_firstbits and b['height'] is not None:
(addr_vers,) = store.selectrow("""
SELECT chain_address_version
FROM chain
WHERE chain_id = ?""", (chain_id,))
store.do_vers_firstbits(addr_vers, b['block_id'])
def offer_existing_block(store, hash, chain_id):
block_row = store.selectrow("""
SELECT block_id, block_height, block_chain_work,
block_nTime, block_total_seconds,
block_total_satoshis, block_satoshi_seconds,
block_total_ss
FROM block
WHERE block_hash = ?
""", (store.hashin(hash),))
if not block_row:
return False
if chain_id is None:
return True
# Block header already seen. Don't import the block,
# but try to add it to the chain.
b = {
"block_id": block_row[0],
"height": block_row[1],
"chain_work": store.binout_int(block_row[2]),
"nTime": block_row[3],
"seconds": block_row[4],
"satoshis": block_row[5],
"ss": block_row[6],
"total_ss": block_row[7]}
if store.selectrow("""
SELECT 1
FROM chain_candidate
WHERE block_id = ?
AND chain_id = ?""",
(b['block_id'], chain_id)):
store.log.info("block %d already in chain %d",
b['block_id'], chain_id)
else:
if b['height'] == 0:
b['hashPrev'] = store.get_chain_by_id(chain_id).genesis_hash_prev
else:
b['hashPrev'] = 'dummy' # Fool adopt_orphans.
store.offer_block_to_chains(b, frozenset([chain_id]))
return True
def find_next_blocks(store, block_id):
ret = []
for row in store.selectall(
"SELECT next_block_id FROM block_next WHERE block_id = ?",
(block_id,)):
ret.append(row[0])
return ret
def find_chains_containing_block(store, block_id):
ret = []
for row in store.selectall(
"SELECT chain_id FROM chain_candidate WHERE block_id = ?",
(block_id,)):
ret.append(row[0])
return frozenset(ret)
def get_prev_block_id(store, block_id):
return store.selectrow(
"SELECT prev_block_id FROM block WHERE block_id = ?",
(block_id,))[0]
def disconnect_block(store, block_id, chain_id):
store.sql("""
UPDATE chain_candidate
SET in_longest = 0
WHERE block_id = ? AND chain_id = ?""",
(block_id, chain_id))
def connect_block(store, block_id, chain_id):
store.sql("""
UPDATE chain_candidate
SET in_longest = 1
WHERE block_id = ? AND chain_id = ?""",
(block_id, chain_id))
def lookup_txout(store, tx_hash, txout_pos):
row = store.selectrow("""
SELECT txout.txout_id, txout.txout_value
FROM txout, tx
WHERE txout.tx_id = tx.tx_id
AND tx.tx_hash = ?
AND txout.txout_pos = ?""",
(store.hashin(tx_hash), txout_pos))
return (None, None) if row is None else (row[0], int(row[1]))
def script_to_pubkey_id(store, chain, script):
"""Extract address and script type from transaction output script."""
script_type, data = chain.parse_txout_script(script)
if script_type in (Chain.SCRIPT_TYPE_ADDRESS, Chain.SCRIPT_TYPE_P2SH):
return store.pubkey_hash_to_id(data)
if script_type == Chain.SCRIPT_TYPE_PUBKEY:
return store.pubkey_to_id(chain, data)
if script_type == Chain.SCRIPT_TYPE_MULTISIG:
script_hash = chain.script_hash(script)
multisig_id = store._pubkey_id(script_hash, script)
if not store.selectrow("SELECT 1 FROM multisig_pubkey WHERE multisig_id = ?", (multisig_id,)):
for pubkey in set(data['pubkeys']):
pubkey_id = store.pubkey_to_id(chain, pubkey)
store.sql("""
INSERT INTO multisig_pubkey (multisig_id, pubkey_id)
VALUES (?, ?)""", (multisig_id, pubkey_id))
return multisig_id
if script_type == Chain.SCRIPT_TYPE_BURN:
return PUBKEY_ID_NETWORK_FEE
return None
def pubkey_hash_to_id(store, pubkey_hash):
return store._pubkey_id(pubkey_hash, None)
def pubkey_to_id(store, chain, pubkey):
pubkey_hash = chain.pubkey_hash(pubkey)
return store._pubkey_id(pubkey_hash, pubkey)
def _pubkey_id(store, pubkey_hash, pubkey):
dbhash = store.binin(pubkey_hash) # binin, not hashin for 160-bit
row = store.selectrow("""
SELECT pubkey_id
FROM pubkey
WHERE pubkey_hash = ?""", (dbhash,))
if row:
return row[0]
pubkey_id = store.new_id("pubkey")
if pubkey is not None and len(pubkey) > MAX_PUBKEY:
pubkey = None
store.sql("""
INSERT INTO pubkey (pubkey_id, pubkey_hash, pubkey)
VALUES (?, ?, ?)""",
(pubkey_id, dbhash, store.binin(pubkey)))
return pubkey_id
def flush(store):
if store.bytes_since_commit > 0:
store.commit()
store.log.debug("commit")
store.bytes_since_commit = 0
def imported_bytes(store, size):
store.bytes_since_commit += size
if store.bytes_since_commit >= store.commit_bytes:
store.flush()
def catch_up(store):
for dircfg in store.datadirs:
try:
loader = dircfg['loader'] or store.default_loader
if loader == "blkfile":
store.catch_up_dir(dircfg)
elif loader in ("rpc", "rpc,blkfile", "default"):
if not store.catch_up_rpc(dircfg):
if loader == "rpc":
raise Exception("RPC load failed")
store.log.debug("catch_up_rpc: abort")
store.catch_up_dir(dircfg)
else:
raise Exception("Unknown datadir loader: %s" % loader)
store.flush()
except Exception, e:
store.log.exception("Failed to catch up %s", dircfg)
store.rollback()
def catch_up_rpc(store, dircfg):
"""
Load new blocks using RPC. Requires running *coind supporting
getblockhash, getblock, and getrawtransaction. Bitcoind v0.8
requires the txindex configuration option. Requires chain_id
in the datadir table.
"""
chain_id = dircfg['chain_id']
if chain_id is None:
store.log.debug("no chain_id")
return False
chain = store.chains_by.id[chain_id]
conffile = dircfg.get('conf') or chain.datadir_conf_file_name
conffile = os.path.join(dircfg['dirname'], conffile)
try:
conf = dict([line.strip().split("=", 1)
if "=" in line
else (line.strip(), True)
for line in open(conffile)
if line != "" and line[0] not in "#\r\n"])
except Exception, e:
store.log.debug("failed to load %s: %s", conffile, e)
return False
rpcuser = conf.get("rpcuser", "")
rpcpassword = conf["rpcpassword"]
rpcconnect = conf.get("rpcconnect", "127.0.0.1")
rpcport = conf.get("rpcport", chain.datadir_rpcport)
url = "http://" + rpcuser + ":" + rpcpassword + "@" + rpcconnect \
+ ":" + str(rpcport)
def rpc(func, *params):
store.rpclog.info("RPC>> %s %s", func, params)
ret = util.jsonrpc(url, func, *params)
if (store.rpclog.isEnabledFor(logging.INFO)):
store.rpclog.info("RPC<< %s",
re.sub(r'\[[^\]]{100,}\]', '[...]', str(ret)))
return ret
def get_blockhash(height):
try:
return rpc("getblockhash", height)
except util.JsonrpcException, e:
if e.code in (-1, -5, -8):
# Block number out of range...
# -1 is legacy code (pre-10.0), generic error
# -8 (RPC_INVALID_PARAMETER) first seen in bitcoind 10.x
# -5 (RPC_NOT_FOUND): Been suggested in #bitcoin-dev as more appropriate
return None
raise
(max_height,) = store.selectrow("""
SELECT block_height
FROM chain_candidate
WHERE chain_id = ?
ORDER BY block_height DESC
LIMIT 1""", (chain.id,))
height = 0 if max_height is None else int(max_height) + 1
def get_tx(rpc_tx_hash):
try:
rpc_tx_hex = rpc("getrawtransaction", rpc_tx_hash)
except util.JsonrpcException, e:
if e.code != -5: # -5: transaction not in index.
raise
if height != 0:
store.log.debug("RPC service lacks full txindex")
return None
# The genesis transaction is unavailable. This is
# normal.
import genesis_tx
rpc_tx_hex = genesis_tx.get(rpc_tx_hash)
if rpc_tx_hex is None:
store.log.debug("genesis transaction unavailable via RPC;"
" see import-tx in abe.conf")
return None
rpc_tx = rpc_tx_hex.decode('hex')
tx_hash = rpc_tx_hash.decode('hex')[::-1]
computed_tx_hash = chain.transaction_hash(rpc_tx)
if tx_hash != computed_tx_hash:
#raise InvalidBlock('transaction hash mismatch')
store.log.debug('transaction hash mismatch: %r != %r', tx_hash, computed_tx_hash)
tx = chain.parse_transaction(rpc_tx)
tx['hash'] = tx_hash
return tx
try:
# Get block hash at height, and at the same time, test
# bitcoind connectivity.
try:
next_hash = get_blockhash(height)
except util.JsonrpcException, e:
raise
except Exception, e:
# Connectivity failure.
store.log.debug("RPC failed: %s", e)
return False
# Find the first new block.
while height > 0:
hash = get_blockhash(height - 1)
if hash is not None and (1,) == store.selectrow("""
SELECT 1
FROM chain_candidate cc
JOIN block b ON (cc.block_id = b.block_id)
WHERE b.block_hash = ?
AND b.block_height IS NOT NULL
AND cc.chain_id = ?""", (
store.hashin_hex(str(hash)), chain.id)):
break
next_hash = hash
height -= 1
# Import new blocks.
rpc_hash = next_hash or get_blockhash(height)
while rpc_hash is not None:
hash = rpc_hash.decode('hex')[::-1]
if store.offer_existing_block(hash, chain.id):
rpc_hash = get_blockhash(height + 1)
else:
rpc_block = rpc("getblock", rpc_hash)
assert rpc_hash == rpc_block['hash']
prev_hash = \
rpc_block['previousblockhash'].decode('hex')[::-1] \
if 'previousblockhash' in rpc_block \
else chain.genesis_hash_prev
block = {
'hash': hash,
'version': int(rpc_block['version']),
'hashPrev': prev_hash,
'hashMerkleRoot':
rpc_block['merkleroot'].decode('hex')[::-1],
'nTime': int(rpc_block['time']),
'nBits': int(rpc_block['bits'], 16),
'nNonce': int(rpc_block['nonce']),
'transactions': [],
'size': int(rpc_block['size']),
'height': height,
}
if chain.block_header_hash(chain.serialize_block_header(
block)) != hash:
raise InvalidBlock('block hash mismatch')
for rpc_tx_hash in rpc_block['tx']:
tx = store.export_tx(tx_hash = str(rpc_tx_hash),
format = "binary")
if tx is None:
tx = get_tx(rpc_tx_hash)
if tx is None:
return False
block['transactions'].append(tx)
store.import_block(block, chain = chain)
store.imported_bytes(block['size'])
rpc_hash = rpc_block.get('nextblockhash')
height += 1
# Import the memory pool.
for rpc_tx_hash in rpc("getrawmempool"):
tx = get_tx(rpc_tx_hash)
if tx is None:
return False
# XXX Race condition in low isolation levels.
tx_id = store.tx_find_id_and_value(tx, False)
if tx_id is None:
tx_id = store.import_tx(tx, False, chain)
store.log.info("mempool tx %d", tx_id)
store.imported_bytes(tx['size'])
except util.JsonrpcMethodNotFound, e:
store.log.debug("bitcoind %s not supported", e.method)
return False
except InvalidBlock, e:
store.log.debug("RPC data not understood: %s", e)
return False
return True
# Load all blocks starting at the current file and offset.
def catch_up_dir(store, dircfg):
def open_blkfile(number):
store._refresh_dircfg(dircfg)
blkfile = {
'stream': BCDataStream.BCDataStream(),
'name': store.blkfile_name(dircfg, number),
'number': number
}
try:
file = open(blkfile['name'], "rb")
except IOError, e:
# Early bitcoind used blk0001.dat to blk9999.dat.
# Now it uses blocks/blk00000.dat to blocks/blk99999.dat.
# Abe starts by assuming the former scheme. If we don't
# find the expected file but do see blocks/blk00000.dat,
# switch to the new scheme. Record the switch by adding
# 100000 to each file number, so for example, 100123 means
# blocks/blk00123.dat but 123 still means blk0123.dat.
if blkfile['number'] > 9999 or e.errno != errno.ENOENT:
raise
new_number = 100000
blkfile['name'] = store.blkfile_name(dircfg, new_number)
file = open(blkfile['name'], "rb")
blkfile['number'] = new_number
try:
blkfile['stream'].map_file(file, 0)
except Exception:
# mmap can fail on an empty file, but empty files are okay.
file.seek(0, os.SEEK_END)
if file.tell() == 0:
blkfile['stream'].input = ""
blkfile['stream'].read_cursor = 0
else:
blkfile['stream'].map_file(file, 0)
finally:
file.close()
store.log.info("Opened %s", blkfile['name'])
return blkfile
def try_close_file(ds):
try:
ds.close_file()
except Exception, e:
store.log.info("BCDataStream: close_file: %s", e)
try:
blkfile = open_blkfile(dircfg['blkfile_number'])
except IOError, e:
store.log.warning("Skipping datadir %s: %s", dircfg['dirname'], e)
return
while True:
dircfg['blkfile_number'] = blkfile['number']
ds = blkfile['stream']
next_blkfile = None
try:
store.import_blkdat(dircfg, ds, blkfile['name'])
except Exception:
store.log.warning("Exception at %d" % ds.read_cursor)
try_close_file(ds)
raise
if next_blkfile is None:
# Try another file.
try:
next_blkfile = open_blkfile(dircfg['blkfile_number'] + 1)
except IOError, e:
if e.errno != errno.ENOENT:
raise
# No more block files.
return
except Exception, e:
if getattr(e, 'errno', None) == errno.ENOMEM:
# Assume 32-bit address space exhaustion.
store.log.warning(
"Cannot allocate memory for next blockfile: "
"skipping safety check")
try_close_file(ds)
blkfile = open_blkfile(dircfg['blkfile_number'] + 1)
dircfg['blkfile_offset'] = 0
continue
raise
finally:
if next_blkfile is None:
try_close_file(ds)
# Load any data written to the last file since we checked.
store.import_blkdat(dircfg, ds, blkfile['name'])
# Continue with the new file.
blkfile = next_blkfile
try_close_file(ds)
dircfg['blkfile_offset'] = 0
# Load all blocks from the given data stream.
def import_blkdat(store, dircfg, ds, filename="[unknown]"):
filenum = dircfg['blkfile_number']
ds.read_cursor = dircfg['blkfile_offset']
while filenum == dircfg['blkfile_number']:
if ds.read_cursor + 8 > len(ds.input):
break
offset = ds.read_cursor
magic = ds.read_bytes(4)
# Assume no real magic number starts with a NUL.
if magic[0] == "\0":
if filenum > 99999 and magic == "\0\0\0\0":
# As of Bitcoin 0.8, files often end with a NUL span.
ds.read_cursor = offset
break
# Skip NUL bytes at block end.
ds.read_cursor = offset
while ds.read_cursor < len(ds.input):
size = min(len(ds.input) - ds.read_cursor, 1000)
data = ds.read_bytes(size).lstrip("\0")
if (data != ""):
ds.read_cursor -= len(data)
break
store.log.info("Skipped %d NUL bytes at block end",
ds.read_cursor - offset)
continue
# Assume blocks obey the respective policy if they get here.
chain_id = dircfg['chain_id']
chain = store.chains_by.id.get(chain_id, None)
if chain is None:
chain = store.chains_by.magic.get(magic, None)
if chain is None:
store.log.warning(
"Chain not found for magic number %s in block file %s at"
" offset %d.", magic.encode('hex'), filename, offset)
not_magic = magic
# Read this file's initial magic number.
magic = ds.input[0:4]
if magic == not_magic:
ds.read_cursor = offset
break
store.log.info(
"Scanning for initial magic number %s.",
magic.encode('hex'))
ds.read_cursor = offset
offset = ds.input.find(magic, offset)
if offset == -1:
store.log.info("Magic number scan unsuccessful.")
break
store.log.info(
"Skipped %d bytes in block file %s at offset %d.",
offset - ds.read_cursor, filename, ds.read_cursor)
ds.read_cursor = offset
continue
length = ds.read_int32()
if ds.read_cursor + length > len(ds.input):
store.log.debug("incomplete block of length %d chain %d",
length, chain.id)
ds.read_cursor = offset
break
end = ds.read_cursor + length
hash = chain.ds_block_header_hash(ds)
# XXX should decode target and check hash against it to
# avoid loading garbage data. But not for merged-mined or
# CPU-mined chains that use different proof-of-work
# algorithms.
if not store.offer_existing_block(hash, chain.id):
b = chain.ds_parse_block(ds)
b["hash"] = hash
if (store.log.isEnabledFor(logging.DEBUG) and b["hashPrev"] == chain.genesis_hash_prev):
try:
store.log.debug("Chain %d genesis tx: %s", chain.id,
b['transactions'][0]['__data__'].encode('hex'))
except Exception:
pass
store.import_block(b, chain = chain)
if ds.read_cursor != end:
store.log.debug("Skipped %d bytes at block end",
end - ds.read_cursor)
ds.read_cursor = end
store.bytes_since_commit += length
if store.bytes_since_commit >= store.commit_bytes:
store.save_blkfile_offset(dircfg, ds.read_cursor)
store.flush()
store._refresh_dircfg(dircfg)
if ds.read_cursor != dircfg['blkfile_offset']:
store.save_blkfile_offset(dircfg, ds.read_cursor)
def blkfile_name(store, dircfg, number=None):
if number is None:
number = dircfg['blkfile_number']
if number > 9999:
return os.path.join(dircfg['dirname'], "blocks", "blk%05d.dat"
% (number - 100000,))
return os.path.join(dircfg['dirname'], "blk%04d.dat" % (number,))
def save_blkfile_offset(store, dircfg, offset):
store.sql("""
UPDATE datadir
SET blkfile_number = ?,
blkfile_offset = ?
WHERE datadir_id = ?""",
(dircfg['blkfile_number'], store.intin(offset),
dircfg['id']))
if store.rowcount() == 0:
store.sql("""
INSERT INTO datadir (datadir_id, dirname, blkfile_number,
blkfile_offset, chain_id)
VALUES (?, ?, ?, ?, ?)""",
(dircfg['id'], dircfg['dirname'],
dircfg['blkfile_number'],
store.intin(offset), dircfg['chain_id']))
dircfg['blkfile_offset'] = offset
def _refresh_dircfg(store, dircfg):
row = store.selectrow("""
SELECT blkfile_number, blkfile_offset
FROM datadir
WHERE dirname = ?""", (dircfg['dirname'],))
if row:
number, offset = map(int, row)
if (number > dircfg['blkfile_number'] or
(number == dircfg['blkfile_number'] and
offset > dircfg['blkfile_offset'])):
dircfg['blkfile_number'] = number
dircfg['blkfile_offset'] = offset
def get_block_number(store, chain_id):
(height,) = store.selectrow("""
SELECT block_height
FROM chain_candidate
WHERE chain_id = ?
AND in_longest = 1
ORDER BY block_height DESC
LIMIT 1""", (chain_id,))
return -1 if height is None else int(height)
def get_target(store, chain_id):
rows = store.selectall("""
SELECT b.block_nBits
FROM block b
JOIN chain c ON (b.block_id = c.chain_last_block_id)
WHERE c.chain_id = ?""", (chain_id,))
return util.calculate_target(int(rows[0][0])) if rows else None
def get_received_and_last_block_id(store, chain_id, pubkey_hash,
block_height = None):
sql = """
SELECT COALESCE(value_sum, 0), c.chain_last_block_id
FROM chain c LEFT JOIN (
SELECT cc.chain_id, SUM(txout.txout_value) value_sum
FROM pubkey
JOIN txout ON (txout.pubkey_id = pubkey.pubkey_id)
JOIN block_tx ON (block_tx.tx_id = txout.tx_id)
JOIN block b ON (b.block_id = block_tx.block_id)
JOIN chain_candidate cc ON (cc.block_id = b.block_id)
WHERE
pubkey.pubkey_hash = ? AND
cc.chain_id = ? AND
cc.in_longest = 1""" + (
"" if block_height is None else """ AND
cc.block_height <= ?""") + """
GROUP BY cc.chain_id
) a ON (c.chain_id = a.chain_id)
WHERE c.chain_id = ?"""
dbhash = store.binin(pubkey_hash)
return store.selectrow(sql,
(dbhash, chain_id, chain_id)
if block_height is None else
(dbhash, chain_id, block_height, chain_id))
def get_received(store, chain_id, pubkey_hash, block_height = None):
return store.get_received_and_last_block_id(
chain_id, pubkey_hash, block_height)[0]
def get_sent_and_last_block_id(store, chain_id, pubkey_hash,
block_height = None):
sql = """
SELECT COALESCE(value_sum, 0), c.chain_last_block_id
FROM chain c LEFT JOIN (
SELECT cc.chain_id, SUM(txout.txout_value) value_sum
FROM pubkey
JOIN txout ON (txout.pubkey_id = pubkey.pubkey_id)
JOIN txin ON (txin.txout_id = txout.txout_id)
JOIN block_tx ON (block_tx.tx_id = txin.tx_id)
JOIN block b ON (b.block_id = block_tx.block_id)
JOIN chain_candidate cc ON (cc.block_id = b.block_id)
WHERE
pubkey.pubkey_hash = ? AND
cc.chain_id = ? AND
cc.in_longest = 1""" + (
"" if block_height is None else """ AND
cc.block_height <= ?""") + """
GROUP BY cc.chain_id
) a ON (c.chain_id = a.chain_id)
WHERE c.chain_id = ?"""
dbhash = store.binin(pubkey_hash)
return store.selectrow(sql,
(dbhash, chain_id, chain_id)
if block_height is None else
(dbhash, chain_id, block_height, chain_id))
def get_sent(store, chain_id, pubkey_hash, block_height = None):
return store.get_sent_and_last_block_id(
chain_id, pubkey_hash, block_height)[0]
def get_balance(store, chain_id, pubkey_hash):
sent, last_block_id = store.get_sent_and_last_block_id(
chain_id, pubkey_hash)
received, last_block_id_2 = store.get_received_and_last_block_id(
chain_id, pubkey_hash)
# Deal with the race condition.
for i in xrange(2):
if last_block_id == last_block_id_2:
break
store.log.debug("Requerying balance: %d != %d",
last_block_id, last_block_id_2)
received, last_block_id_2 = store.get_received(
chain_id, pubkey_hash, store.get_block_height(last_block_id))
if last_block_id == last_block_id_2:
break
store.log.info("Balance query affected by reorg? %d != %d",
last_block_id, last_block_id_2)
sent, last_block_id = store.get_sent(
chain_id, pubkey_hash, store.get_block_height(last_block_id_2))
if last_block_id != last_block_id_2:
store.log.warning("Balance query failed due to loader activity.")
return None
return received - sent
def firstbits_full(store, version, hash):
"""
Return the address in lowercase. An initial substring of this
will become the firstbits.
"""
return util.hash_to_address(version, hash).lower()
def insert_firstbits(store, pubkey_id, block_id, addr_vers, fb):
store.sql("""
INSERT INTO abe_firstbits (
pubkey_id, block_id, address_version, firstbits
)
VALUES (?, ?, ?, ?)""",
(pubkey_id, block_id, addr_vers, fb))
def cant_do_firstbits(store, addr_vers, block_id, pubkey_id):
store.log.info(
"No firstbits for pubkey_id %d, block_id %d, version '%s'",
pubkey_id, block_id, store.binout_hex(addr_vers))
store.insert_firstbits(pubkey_id, block_id, addr_vers, '')
def do_firstbits(store, addr_vers, block_id, fb, ids, full):
"""
Insert the firstbits that start with fb using addr_vers and
are first seen in block_id. Return the count of rows
inserted.
fb -- string, not a firstbits using addr_vers in any ancestor
of block_id
ids -- set of ids of all pubkeys first seen in block_id whose
firstbits start with fb
full -- map from pubkey_id to full firstbits
"""
if len(ids) <= 1:
for pubkey_id in ids:
store.insert_firstbits(pubkey_id, block_id, addr_vers, fb)
return len(ids)
pubkeys = {}
for pubkey_id in ids:
s = full[pubkey_id]
if s == fb:
store.cant_do_firstbits(addr_vers, block_id, pubkey_id)
continue
fb1 = fb + s[len(fb)]
ids1 = pubkeys.get(fb1)
if ids1 is None:
ids1 = set()
pubkeys[fb1] = ids1
ids1.add(pubkey_id)
count = 0
for fb1, ids1 in pubkeys.iteritems():
count += store.do_firstbits(addr_vers, block_id, fb1, ids1, full)
return count
def do_vers_firstbits(store, addr_vers, block_id):
"""
Create new firstbits records for block and addr_vers. All
ancestor blocks must have their firstbits already recorded.
"""
address_version = store.binout(addr_vers)
pubkeys = {} # firstbits to set of pubkey_id
full = {} # pubkey_id to full firstbits, or None if old
for pubkey_id, pubkey_hash, oblock_id in store.selectall("""
SELECT DISTINCT
pubkey.pubkey_id,
pubkey.pubkey_hash,
fb.block_id
FROM block b
JOIN block_tx bt ON (b.block_id = bt.block_id)
JOIN txout ON (bt.tx_id = txout.tx_id)
JOIN pubkey ON (txout.pubkey_id = pubkey.pubkey_id)
LEFT JOIN abe_firstbits fb ON (
fb.address_version = ?
AND fb.pubkey_id = pubkey.pubkey_id)
WHERE b.block_id = ?""", (addr_vers, block_id)):
pubkey_id = int(pubkey_id)
if (oblock_id is not None and
store.is_descended_from(block_id, int(oblock_id))):
full[pubkey_id] = None
if pubkey_id in full:
continue
full[pubkey_id] = store.firstbits_full(address_version,
store.binout(pubkey_hash))
for pubkey_id, s in full.iteritems():
if s is None:
continue
# This is the pubkey's first appearance in the chain.
# Find the longest match among earlier firstbits.
longest, longest_id = 0, None
substrs = [s[0:(i+1)] for i in xrange(len(s))]
for ancestor_id, fblen, o_pubkey_id in store.selectall("""
SELECT block_id, LENGTH(firstbits), pubkey_id
FROM abe_firstbits fb
WHERE address_version = ?
AND firstbits IN (?""" + (",?" * (len(s)-1)) + """
)""", tuple([addr_vers] + substrs)):
if fblen > longest and store.is_descended_from(
block_id, int(ancestor_id)):
longest, longest_id = fblen, o_pubkey_id
# If necessary, extend the new fb to distinguish it from
# the longest match.
if longest_id is not None:
(o_hash,) = store.selectrow(
"SELECT pubkey_hash FROM pubkey WHERE pubkey_id = ?",
(longest_id,))
o_fb = store.firstbits_full(
address_version, store.binout(o_hash))
max_len = min(len(s), len(o_fb))
while longest < max_len and s[longest] == o_fb[longest]:
longest += 1
if longest == len(s):
store.cant_do_firstbits(addr_vers, block_id, pubkey_id)
continue
fb = s[0 : (longest + 1)]
ids = pubkeys.get(fb)
if ids is None:
ids = set()
pubkeys[fb] = ids
ids.add(pubkey_id)
count = 0
for fb, ids in pubkeys.iteritems():
count += store.do_firstbits(addr_vers, block_id, fb, ids, full)
return count
def firstbits_to_addresses(store, fb, chain_id=None):
dbfb = fb.lower()
ret = []
bind = [fb[0:(i+1)] for i in xrange(len(fb))]
if chain_id is not None:
bind.append(chain_id)
for dbhash, vers in store.selectall("""
SELECT pubkey.pubkey_hash,
fb.address_version
FROM abe_firstbits fb
JOIN pubkey ON (fb.pubkey_id = pubkey.pubkey_id)
JOIN chain_candidate cc ON (cc.block_id = fb.block_id)
WHERE fb.firstbits IN (?""" + (",?" * (len(fb)-1)) + """)""" + ( \
"" if chain_id is None else """
AND cc.chain_id = ?"""), tuple(bind)):
address = util.hash_to_address(store.binout(vers),
store.binout(dbhash))
if address.lower().startswith(dbfb):
ret.append(address)
if len(ret) == 0 or (len(ret) > 1 and fb in ret):
ret = [fb] # assume exact address match
return ret
def get_firstbits(store, address_version=None, db_pubkey_hash=None,
chain_id=None):
"""
Return address's firstbits, or the longest of multiple
firstbits values if chain_id is not given, or None if address
has not appeared, or the empty string if address has appeared
but has no firstbits.
"""
vers, dbhash = store.binin(address_version), db_pubkey_hash
rows = store.selectall("""
SELECT fb.firstbits
FROM abe_firstbits fb
JOIN pubkey ON (fb.pubkey_id = pubkey.pubkey_id)
JOIN chain_candidate cc ON (fb.block_id = cc.block_id)
WHERE cc.in_longest = 1
AND fb.address_version = ?
AND pubkey.pubkey_hash = ?""" + (
"" if chain_id is None else """
AND cc.chain_id = ?"""),
(vers, dbhash) if chain_id is None else
(vers, dbhash, chain_id))
if not rows:
return None
ret = ""
for (fb,) in rows:
if len(fb) > len(ret):
ret = fb
return ret
def new(args):
return DataStore(args)
|
asimshankar/tensorflow | refs/heads/master | tensorflow/tools/compatibility/reorders_v2.py | 2 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
This file should be updated whenever a function is added to
self.reordered_function_names in tf_upgrade_v2.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
reorders = {
'tf.argmax': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.argmin': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.batch_to_space': ['input', 'crops', 'block_size', 'name'],
'tf.boolean_mask': ['tensor', 'mask', 'name', 'axis'],
'tf.confusion_matrix': ['labels', 'predictions', 'num_classes', 'dtype', 'name', 'weights'],
'tf.convert_to_tensor': ['value', 'dtype', 'name', 'preferred_dtype'],
'tf.decode_csv': ['records', 'record_defaults', 'field_delim', 'use_quote_delim', 'name', 'na_value', 'select_cols'],
'tf.depth_to_space': ['input', 'block_size', 'name', 'data_format'],
'tf.feature_column.categorical_column_with_vocabulary_file': ['key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'default_value', 'dtype'],
'tf.io.decode_csv': ['records', 'record_defaults', 'field_delim', 'use_quote_delim', 'name', 'na_value', 'select_cols'],
'tf.io.parse_example': ['serialized', 'features', 'name', 'example_names'],
'tf.io.parse_single_example': ['serialized', 'features', 'name', 'example_names'],
'tf.io.serialize_many_sparse': ['sp_input', 'name', 'out_type'],
'tf.io.serialize_sparse': ['sp_input', 'name', 'out_type'],
'tf.linalg.norm': ['tensor', 'ord', 'axis', 'keepdims', 'name', 'keep_dims'],
'tf.math.argmax': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.math.argmin': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.math.confusion_matrix': ['labels', 'predictions', 'num_classes', 'dtype', 'name', 'weights'],
'tf.math.in_top_k': ['predictions', 'targets', 'k', 'name'],
'tf.math.reduce_all': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_any': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_logsumexp': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_max': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_mean': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_min': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_prod': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_sum': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.multinomial': ['logits', 'num_samples', 'seed', 'name', 'output_dtype'],
'tf.nn.convolution': ['input', 'filter', 'padding', 'strides', 'dilation_rate', 'name', 'data_format'],
'tf.nn.crelu': ['features', 'name', 'axis'],
'tf.nn.depth_to_space': ['input', 'block_size', 'name', 'data_format'],
'tf.nn.depthwise_conv2d': ['input', 'filter', 'strides', 'padding', 'rate', 'name', 'data_format'],
'tf.nn.embedding_lookup': ['params', 'ids', 'partition_strategy', 'name', 'validate_indices', 'max_norm'],
'tf.nn.embedding_lookup_sparse': ['params', 'sp_ids', 'sp_weights', 'partition_strategy', 'name', 'combiner', 'max_norm'],
'tf.nn.in_top_k': ['predictions', 'targets', 'k', 'name'],
'tf.nn.moments': ['x', 'axes', 'shift', 'name', 'keep_dims'],
'tf.nn.pool': ['input', 'window_shape', 'pooling_type', 'padding', 'dilation_rate', 'strides', 'name', 'data_format'],
'tf.nn.separable_conv2d': ['input', 'depthwise_filter', 'pointwise_filter', 'strides', 'padding', 'rate', 'name', 'data_format'],
'tf.nn.space_to_batch': ['input', 'paddings', 'block_size', 'name'],
'tf.nn.space_to_depth': ['input', 'block_size', 'name', 'data_format'],
'tf.nn.weighted_moments': ['x', 'axes', 'frequency_weights', 'name', 'keep_dims'],
'tf.norm': ['tensor', 'ord', 'axis', 'keepdims', 'name', 'keep_dims'],
'tf.pad': ['tensor', 'paddings', 'mode', 'name', 'constant_values'],
'tf.parse_example': ['serialized', 'features', 'name', 'example_names'],
'tf.parse_single_example': ['serialized', 'features', 'name', 'example_names'],
'tf.quantize_v2': ['input', 'min_range', 'max_range', 'T', 'mode', 'name', 'round_mode'],
'tf.random.multinomial': ['logits', 'num_samples', 'seed', 'name', 'output_dtype'],
'tf.random.poisson': ['lam', 'shape', 'dtype', 'seed', 'name'],
'tf.random_poisson': ['lam', 'shape', 'dtype', 'seed', 'name'],
'tf.reduce_all': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_any': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices'],
'tf.reduce_logsumexp': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_max': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_mean': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_min': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_prod': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_sum': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reverse_sequence': ['input', 'seq_lengths', 'seq_axis', 'batch_axis', 'name', 'seq_dim', 'batch_dim'],
'tf.serialize_many_sparse': ['sp_input', 'name', 'out_type'],
'tf.serialize_sparse': ['sp_input', 'name', 'out_type'],
'tf.shape': ['input', 'name', 'out_type'],
'tf.size': ['input', 'name', 'out_type'],
'tf.space_to_batch': ['input', 'paddings', 'block_size', 'name'],
'tf.space_to_depth': ['input', 'block_size', 'name', 'data_format'],
'tf.sparse.add': ['a', 'b', 'threshold', 'thresh'],
'tf.sparse.concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim'],
'tf.sparse.reduce_max': ['sp_input', 'axis', 'keepdims', 'reduction_axes', 'keep_dims'],
'tf.sparse.segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.segment_sqrt_n': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'],
'tf.sparse_add': ['a', 'b', 'threshold', 'thresh'],
'tf.sparse_concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim'],
'tf.sparse_matmul': ['a', 'b', 'transpose_a', 'transpose_b', 'a_is_sparse', 'b_is_sparse', 'name'],
'tf.sparse_reduce_max': ['sp_input', 'axis', 'keepdims', 'reduction_axes', 'keep_dims'],
'tf.sparse_segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_segment_sqrt_n': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'],
'tf.strings.length': ['input', 'name', 'unit'],
'tf.strings.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices'],
'tf.strings.substr': ['input', 'pos', 'len', 'name', 'unit'],
'tf.substr': ['input', 'pos', 'len', 'name', 'unit'],
'tf.transpose': ['a', 'perm', 'name', 'conjugate'],
'tf.tuple': ['tensors', 'name', 'control_inputs'],
'tf.while_loop': ['cond', 'body', 'loop_vars', 'shape_invariants', 'parallel_iterations', 'back_prop', 'swap_memory', 'name', 'maximum_iterations', 'return_same_structure']
}
|
xuxiao19910803/edx | refs/heads/master | lms/djangoapps/courseware/tests/test_lti_integration.py | 114 | """LTI integration tests"""
from collections import OrderedDict
import json
import mock
from nose.plugins.attrib import attr
import oauthlib
import urllib
from django.conf import settings
from django.core.urlresolvers import reverse
from courseware.tests import BaseTestXmodule
from courseware.views import get_course_lti_endpoints
from lms.djangoapps.lms_xblock.runtime import quote_slashes
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.x_module import STUDENT_VIEW
@attr('shard_1')
class TestLTI(BaseTestXmodule):
"""
Integration test for lti xmodule.
It checks overall code, by assuring that context that goes to template is correct.
As part of that, checks oauth signature generation by mocking signing function
of `oauthlib` library.
"""
CATEGORY = "lti"
def setUp(self):
"""
Mock oauth1 signing of requests library for testing.
"""
super(TestLTI, self).setUp()
mocked_nonce = u'135685044251684026041377608307'
mocked_timestamp = u'1234567890'
mocked_signature_after_sign = u'my_signature%3D'
mocked_decoded_signature = u'my_signature='
# Note: this course_id is actually a course_key
context_id = self.item_descriptor.course_id.to_deprecated_string()
user_id = unicode(self.item_descriptor.xmodule_runtime.anonymous_student_id)
hostname = self.item_descriptor.xmodule_runtime.hostname
resource_link_id = unicode(urllib.quote('{}-{}'.format(hostname, self.item_descriptor.location.html_id())))
sourcedId = "{context}:{resource_link}:{user_id}".format(
context=urllib.quote(context_id),
resource_link=resource_link_id,
user_id=user_id
)
self.correct_headers = {
u'user_id': user_id,
u'oauth_callback': u'about:blank',
u'launch_presentation_return_url': '',
u'lti_message_type': u'basic-lti-launch-request',
u'lti_version': 'LTI-1p0',
u'roles': u'Student',
u'context_id': context_id,
u'resource_link_id': resource_link_id,
u'lis_result_sourcedid': sourcedId,
u'oauth_nonce': mocked_nonce,
u'oauth_timestamp': mocked_timestamp,
u'oauth_consumer_key': u'',
u'oauth_signature_method': u'HMAC-SHA1',
u'oauth_version': u'1.0',
u'oauth_signature': mocked_decoded_signature
}
saved_sign = oauthlib.oauth1.Client.sign
self.expected_context = {
'display_name': self.item_descriptor.display_name,
'input_fields': self.correct_headers,
'element_class': self.item_descriptor.category,
'element_id': self.item_descriptor.location.html_id(),
'launch_url': 'http://www.example.com', # default value
'open_in_a_new_page': True,
'form_url': self.item_descriptor.xmodule_runtime.handler_url(self.item_descriptor,
'preview_handler').rstrip('/?'),
'hide_launch': False,
'has_score': False,
'module_score': None,
'comment': u'',
'weight': 1.0,
'ask_to_send_username': self.item_descriptor.ask_to_send_username,
'ask_to_send_email': self.item_descriptor.ask_to_send_email,
'description': self.item_descriptor.description,
'button_text': self.item_descriptor.button_text,
'accept_grades_past_due': self.item_descriptor.accept_grades_past_due,
}
def mocked_sign(self, *args, **kwargs):
"""
Mocked oauth1 sign function.
"""
# self is <oauthlib.oauth1.rfc5849.Client object> here:
__, headers, __ = saved_sign(self, *args, **kwargs)
# we should replace nonce, timestamp and signed_signature in headers:
old = headers[u'Authorization']
old_parsed = OrderedDict([param.strip().replace('"', '').split('=') for param in old.split(',')])
old_parsed[u'OAuth oauth_nonce'] = mocked_nonce
old_parsed[u'oauth_timestamp'] = mocked_timestamp
old_parsed[u'oauth_signature'] = mocked_signature_after_sign
headers[u'Authorization'] = ', '.join([k + '="' + v + '"' for k, v in old_parsed.items()])
return None, headers, None
patcher = mock.patch.object(oauthlib.oauth1.Client, "sign", mocked_sign)
patcher.start()
self.addCleanup(patcher.stop)
def test_lti_constructor(self):
generated_content = self.item_descriptor.render(STUDENT_VIEW).content
expected_content = self.runtime.render_template('lti.html', self.expected_context)
self.assertEqual(generated_content, expected_content)
def test_lti_preview_handler(self):
generated_content = self.item_descriptor.preview_handler(None, None).body
expected_content = self.runtime.render_template('lti_form.html', self.expected_context)
self.assertEqual(generated_content, expected_content)
@attr('shard_1')
class TestLTIModuleListing(ModuleStoreTestCase):
"""
a test for the rest endpoint that lists LTI modules in a course
"""
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
def setUp(self):
"""Create course, 2 chapters, 2 sections"""
super(TestLTIModuleListing, self).setUp()
self.course = CourseFactory.create(display_name=self.COURSE_NAME, number=self.COURSE_SLUG)
self.chapter1 = ItemFactory.create(
parent_location=self.course.location,
display_name="chapter1",
category='chapter')
self.section1 = ItemFactory.create(
parent_location=self.chapter1.location,
display_name="section1",
category='sequential')
self.chapter2 = ItemFactory.create(
parent_location=self.course.location,
display_name="chapter2",
category='chapter')
self.section2 = ItemFactory.create(
parent_location=self.chapter2.location,
display_name="section2",
category='sequential')
# creates one draft and one published lti module, in different sections
self.lti_published = ItemFactory.create(
parent_location=self.section1.location,
display_name="lti published",
category="lti",
location=self.course.id.make_usage_key('lti', 'lti_published'),
)
self.lti_draft = ItemFactory.create(
parent_location=self.section2.location,
display_name="lti draft",
category="lti",
location=self.course.id.make_usage_key('lti', 'lti_draft'),
publish_item=False,
)
def expected_handler_url(self, handler):
"""convenience method to get the reversed handler urls"""
return "https://{}{}".format(settings.SITE_NAME, reverse(
'courseware.module_render.handle_xblock_callback_noauth',
args=[
self.course.id.to_deprecated_string(),
quote_slashes(unicode(self.lti_published.scope_ids.usage_id.to_deprecated_string()).encode('utf-8')),
handler
]
))
def test_lti_rest_bad_course(self):
"""Tests what happens when the lti listing rest endpoint gets a bad course_id"""
bad_ids = [u"sf", u"dne/dne/dne", u"fo/ey/\\u5305"]
for bad_course_id in bad_ids:
lti_rest_endpoints_url = 'courses/{}/lti_rest_endpoints/'.format(bad_course_id)
response = self.client.get(lti_rest_endpoints_url)
self.assertEqual(404, response.status_code)
def test_lti_rest_listing(self):
"""tests that the draft lti module is part of the endpoint response"""
request = mock.Mock()
request.method = 'GET'
response = get_course_lti_endpoints(request, course_id=self.course.id.to_deprecated_string())
self.assertEqual(200, response.status_code)
self.assertEqual('application/json', response['Content-Type'])
expected = {
"lti_1_1_result_service_xml_endpoint": self.expected_handler_url('grade_handler'),
"lti_2_0_result_service_json_endpoint":
self.expected_handler_url('lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
"display_name": self.lti_published.display_name,
}
self.assertEqual([expected], json.loads(response.content))
def test_lti_rest_non_get(self):
"""tests that the endpoint returns 404 when hit with NON-get"""
DISALLOWED_METHODS = ("POST", "PUT", "DELETE", "HEAD", "OPTIONS") # pylint: disable=invalid-name
for method in DISALLOWED_METHODS:
request = mock.Mock()
request.method = method
response = get_course_lti_endpoints(request, self.course.id.to_deprecated_string())
self.assertEqual(405, response.status_code)
|
amferraz/download4chan | refs/heads/master | download4chan/download4chan/pipelines.py | 1 | # coding: utf-8
from scrapy.contrib.pipeline.images import ImagesPipeline
class Image4ChanDownloadPipeline(ImagesPipeline):
def image_key(self, url):
image_guid = url.split('/')[-1]
return 'full/%s.jpg' % (image_guid)
|
lahosken/pants | refs/heads/master | testprojects/tests/python/pants/timeout/test_exceeds_timeout.py | 12 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import time
def test_within_timeout():
time.sleep(0.1)
def test_exceeds_timeout():
time.sleep(120)
|
patrickwestphal/owlapy | refs/heads/master | owlapy/model/owlontologyid.py | 1 | class OWLOntologyID(object):
"""TODO: implement"""
def __init__(self, ontology_iri=None, version_iri=None):
"""
:param ontology_iri: an owlapy.model.IRI object
:param version_iri: an owlapy.model.IRI object
"""
pass |
little-dude/monolithe | refs/heads/master | monolithe/generators/sdkgenerator.py | 2 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
import os
import shutil
from monolithe.lib import Printer
from monolithe.generators.lib import Generator
from monolithe.generators.managers import MainManager, CLIManager, VanillaManager
from .sdkapiversiongenerator import SDKAPIVersionGenerator
class SDKGenerator(Generator):
def cleanup(self):
output = self.config.get_option("output", "transformer")
language = self.config.language
overrides_path = "%s/%s/__overrides" % (output, language)
if os.path.exists(overrides_path):
shutil.rmtree(overrides_path)
attrs_defaults_path = "%s/%s/__attributes_defaults" % (output, language)
if os.path.exists(attrs_defaults_path):
shutil.rmtree(attrs_defaults_path)
code_header_path = "%s/%s/__code_header" % (output, language)
if os.path.exists(code_header_path):
os.remove(code_header_path)
def generate(self, specification_info):
user_vanilla = self.config.get_option("user_vanilla", "transformer")
output = self.config.get_option("output", "transformer")
name = self.config.get_option("name", "transformer")
lang = self.config.language
if not os.path.exists(os.path.join(output, lang)):
os.makedirs(os.path.join(output, lang))
vanilla_manager = VanillaManager(monolithe_config=self.config)
vanilla_manager.execute(output_path="%s/%s" % (output, lang))
self.install_user_vanilla(user_vanilla_path=user_vanilla, output_path="%s/%s" % (output, lang))
version_generator = SDKAPIVersionGenerator(self.config)
apiversions = []
for info in specification_info:
Printer.log("transforming specifications into %s for version %s..." % (lang, info["api"]["version"]))
apiversions.append(info["api"]["version"])
version_generator.generate(specification_info=specification_info)
Printer.log("assembling...")
manager = MainManager(monolithe_config=self.config)
manager.execute(apiversions=apiversions)
cli_manager = CLIManager(monolithe_config=self.config)
cli_manager.execute()
self.cleanup()
Printer.success("%s generation complete and available in \"%s/%s\"" % (name, output, self.config.language))
|
raptorz/userga | refs/heads/master | config.py | 1 | # -*- coding: utf-8 -*-
"""
default config file
:copyright: 20160204 by raptor.zh@gmail.com.
"""
#from __future__ import unicode_literals
import sys
PY3=sys.version>"3"
from os.path import dirname, abspath, expanduser, join as joinpath
import json
import logging
logger = logging.getLogger(__name__)
config_default = {
"db_url": "sqlite:///userga.dat",
"web_path": "userga",
"web_addr": "127.0.0.1",
"web_port": 8001,
"smtp_server": "",
"smtp_port": 25,
"smtp_user": "",
"smtp_pass": "",
"debug": True,
}
def get_fullname(*args):
root = dirname(abspath(__file__))
return joinpath(root, joinpath(*args)) if len(args) > 0 else root
def uniencode(s, coding="utf-8"):
return s.encode(coding) if s and (PY3 or not isinstance(s, str)) else s
def unidecode(s, coding="utf-8"):
return unicode(s, coding) if s and (not PY3 or isinstance(s, str)) else s
try:
with open(get_fullname("config.json"), "r") as f:
config = json.loads(f.read())
config_default.update(config)
config = config_default
except IOError:
config = config_default
|
maljac/odoomrp-wip | refs/heads/8.0 | purchase_packaging_info/__init__.py | 379 | # -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from . import models
|
vongochung/buiquocviet | refs/heads/master | django/forms/widgets.py | 73 | """
HTML Widget classes
"""
from __future__ import absolute_import
import copy
import datetime
from itertools import chain
from urlparse import urljoin
from django.conf import settings
from django.forms.util import flatatt, to_current_timezone
from django.utils.datastructures import MultiValueDict, MergeDict
from django.utils.html import escape, conditional_escape
from django.utils.translation import ugettext, ugettext_lazy
from django.utils.encoding import StrAndUnicode, force_unicode
from django.utils.safestring import mark_safe
from django.utils import datetime_safe, formats
__all__ = (
'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'PasswordInput',
'HiddenInput', 'MultipleHiddenInput', 'ClearableFileInput',
'FileInput', 'DateInput', 'DateTimeInput', 'TimeInput', 'Textarea', 'CheckboxInput',
'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget',
'SplitDateTimeWidget',
)
MEDIA_TYPES = ('css','js')
class Media(StrAndUnicode):
def __init__(self, media=None, **kwargs):
if media:
media_attrs = media.__dict__
else:
media_attrs = kwargs
self._css = {}
self._js = []
for name in MEDIA_TYPES:
getattr(self, 'add_' + name)(media_attrs.get(name, None))
# Any leftover attributes must be invalid.
# if media_attrs != {}:
# raise TypeError("'class Media' has invalid attribute(s): %s" % ','.join(media_attrs.keys()))
def __unicode__(self):
return self.render()
def render(self):
return mark_safe(u'\n'.join(chain(*[getattr(self, 'render_' + name)() for name in MEDIA_TYPES])))
def render_js(self):
return [u'<script type="text/javascript" src="%s"></script>' % self.absolute_path(path) for path in self._js]
def render_css(self):
# To keep rendering order consistent, we can't just iterate over items().
# We need to sort the keys, and iterate over the sorted list.
media = self._css.keys()
media.sort()
return chain(*[
[u'<link href="%s" type="text/css" media="%s" rel="stylesheet" />' % (self.absolute_path(path), medium)
for path in self._css[medium]]
for medium in media])
def absolute_path(self, path, prefix=None):
if path.startswith(u'http://') or path.startswith(u'https://') or path.startswith(u'/'):
return path
if prefix is None:
if settings.STATIC_URL is None:
# backwards compatibility
prefix = settings.MEDIA_URL
else:
prefix = settings.STATIC_URL
return urljoin(prefix, path)
def __getitem__(self, name):
"Returns a Media object that only contains media of the given type"
if name in MEDIA_TYPES:
return Media(**{str(name): getattr(self, '_' + name)})
raise KeyError('Unknown media type "%s"' % name)
def add_js(self, data):
if data:
for path in data:
if path not in self._js:
self._js.append(path)
def add_css(self, data):
if data:
for medium, paths in data.items():
for path in paths:
if not self._css.get(medium) or path not in self._css[medium]:
self._css.setdefault(medium, []).append(path)
def __add__(self, other):
combined = Media()
for name in MEDIA_TYPES:
getattr(combined, 'add_' + name)(getattr(self, '_' + name, None))
getattr(combined, 'add_' + name)(getattr(other, '_' + name, None))
return combined
def media_property(cls):
def _media(self):
# Get the media property of the superclass, if it exists
if hasattr(super(cls, self), 'media'):
base = super(cls, self).media
else:
base = Media()
# Get the media definition for this class
definition = getattr(cls, 'Media', None)
if definition:
extend = getattr(definition, 'extend', True)
if extend:
if extend == True:
m = base
else:
m = Media()
for medium in extend:
m = m + base[medium]
return m + Media(definition)
else:
return Media(definition)
else:
return base
return property(_media)
class MediaDefiningClass(type):
"Metaclass for classes that can have media definitions"
def __new__(cls, name, bases, attrs):
new_class = super(MediaDefiningClass, cls).__new__(cls, name, bases,
attrs)
if 'media' not in attrs:
new_class.media = media_property(new_class)
return new_class
class SubWidget(StrAndUnicode):
"""
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __unicode__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
class Widget(object):
__metaclass__ = MediaDefiningClass
is_hidden = False # Determines whether this corresponds to an <input type="hidden">.
needs_multipart_form = False # Determines does this widget need multipart form
is_localized = False
is_required = False
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
memo[id(self)] = obj
return obj
def subwidgets(self, name, value, attrs=None, choices=()):
"""
Yields all "subwidgets" of this widget. Used only by RadioSelect to
allow template access to individual <input type="radio"> buttons.
Arguments are the same as for render().
"""
yield SubWidget(self, name, value, attrs, choices)
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, files, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name, None)
def _has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or inital value we get
# is None, replace it w/ u''.
if data is None:
data_value = u''
else:
data_value = data
if initial is None:
initial_value = u''
else:
initial_value = initial
if force_unicode(initial_value) != force_unicode(data_value):
return True
return False
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
class Input(Widget):
"""
Base class for all <input> widgets (except type='checkbox' and
type='radio', which are special).
"""
input_type = None # Subclasses must define this.
def _format_value(self, value):
if self.is_localized:
return formats.localize_input(value)
return value
def render(self, name, value, attrs=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(self._format_value(value))
return mark_safe(u'<input%s />' % flatatt(final_attrs))
class TextInput(Input):
input_type = 'text'
class PasswordInput(Input):
input_type = 'password'
def __init__(self, attrs=None, render_value=False):
super(PasswordInput, self).__init__(attrs)
self.render_value = render_value
def render(self, name, value, attrs=None):
if not self.render_value: value=None
return super(PasswordInput, self).render(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
is_hidden = True
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
def __init__(self, attrs=None, choices=()):
super(MultipleHiddenInput, self).__init__(attrs)
# choices can be any iterable
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
id_ = final_attrs.get('id', None)
inputs = []
for i, v in enumerate(value):
input_attrs = dict(value=force_unicode(v), **final_attrs)
if id_:
# An ID attribute was given. Add a numeric index as a suffix
# so that the inputs don't all have the same ID attribute.
input_attrs['id'] = '%s_%s' % (id_, i)
inputs.append(u'<input%s />' % flatatt(input_attrs))
return mark_safe(u'\n'.join(inputs))
def value_from_datadict(self, data, files, name):
if isinstance(data, (MultiValueDict, MergeDict)):
return data.getlist(name)
return data.get(name, None)
class FileInput(Input):
input_type = 'file'
needs_multipart_form = True
def render(self, name, value, attrs=None):
return super(FileInput, self).render(name, None, attrs=attrs)
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
return files.get(name, None)
def _has_changed(self, initial, data):
if data is None:
return False
return True
FILE_INPUT_CONTRADICTION = object()
class ClearableFileInput(FileInput):
initial_text = ugettext_lazy('Currently')
input_text = ugettext_lazy('Change')
clear_checkbox_label = ugettext_lazy('Clear')
template_with_initial = u'%(initial_text)s: %(initial)s %(clear_template)s<br />%(input_text)s: %(input)s'
template_with_clear = u'%(clear)s <label for="%(clear_checkbox_id)s">%(clear_checkbox_label)s</label>'
def clear_checkbox_name(self, name):
"""
Given the name of the file input, return the name of the clear checkbox
input.
"""
return name + '-clear'
def clear_checkbox_id(self, name):
"""
Given the name of the clear checkbox input, return the HTML id for it.
"""
return name + '_id'
def render(self, name, value, attrs=None):
substitutions = {
'initial_text': self.initial_text,
'input_text': self.input_text,
'clear_template': '',
'clear_checkbox_label': self.clear_checkbox_label,
}
template = u'%(input)s'
substitutions['input'] = super(ClearableFileInput, self).render(name, value, attrs)
if value and hasattr(value, "url"):
template = self.template_with_initial
substitutions['initial'] = (u'<a href="%s">%s</a>'
% (escape(value.url),
escape(force_unicode(value))))
if not self.is_required:
checkbox_name = self.clear_checkbox_name(name)
checkbox_id = self.clear_checkbox_id(checkbox_name)
substitutions['clear_checkbox_name'] = conditional_escape(checkbox_name)
substitutions['clear_checkbox_id'] = conditional_escape(checkbox_id)
substitutions['clear'] = CheckboxInput().render(checkbox_name, False, attrs={'id': checkbox_id})
substitutions['clear_template'] = self.template_with_clear % substitutions
return mark_safe(template % substitutions)
def value_from_datadict(self, data, files, name):
upload = super(ClearableFileInput, self).value_from_datadict(data, files, name)
if not self.is_required and CheckboxInput().value_from_datadict(
data, files, self.clear_checkbox_name(name)):
if upload:
# If the user contradicts themselves (uploads a new file AND
# checks the "clear" checkbox), we return a unique marker
# object that FileField will turn into a ValidationError.
return FILE_INPUT_CONTRADICTION
# False signals to clear any existing value, as opposed to just None
return False
return upload
class Textarea(Widget):
def __init__(self, attrs=None):
# The 'rows' and 'cols' attributes are required for HTML correctness.
default_attrs = {'cols': '40', 'rows': '10'}
if attrs:
default_attrs.update(attrs)
super(Textarea, self).__init__(default_attrs)
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
return mark_safe(u'<textarea%s>%s</textarea>' % (flatatt(final_attrs),
conditional_escape(force_unicode(value))))
class DateInput(Input):
input_type = 'text'
def __init__(self, attrs=None, format=None):
super(DateInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('DATE_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
value = datetime_safe.new_date(value)
return value.strftime(self.format)
return value
def _has_changed(self, initial, data):
# If our field has show_hidden_initial=True, initial will be a string
# formatted by HiddenInput using formats.localize_input, which is not
# necessarily the format used for this widget. Attempt to convert it.
try:
input_format = formats.get_format('DATE_INPUT_FORMATS')[0]
initial = datetime.datetime.strptime(initial, input_format).date()
except (TypeError, ValueError):
pass
return super(DateInput, self)._has_changed(self._format_value(initial), data)
class DateTimeInput(Input):
input_type = 'text'
def __init__(self, attrs=None, format=None):
super(DateTimeInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('DATETIME_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
value = datetime_safe.new_datetime(value)
return value.strftime(self.format)
return value
def _has_changed(self, initial, data):
# If our field has show_hidden_initial=True, initial will be a string
# formatted by HiddenInput using formats.localize_input, which is not
# necessarily the format used for this widget. Attempt to convert it.
try:
input_format = formats.get_format('DATETIME_INPUT_FORMATS')[0]
initial = datetime.datetime.strptime(initial, input_format)
except (TypeError, ValueError):
pass
return super(DateTimeInput, self)._has_changed(self._format_value(initial), data)
class TimeInput(Input):
input_type = 'text'
def __init__(self, attrs=None, format=None):
super(TimeInput, self).__init__(attrs)
if format:
self.format = format
self.manual_format = True
else:
self.format = formats.get_format('TIME_INPUT_FORMATS')[0]
self.manual_format = False
def _format_value(self, value):
if self.is_localized and not self.manual_format:
return formats.localize_input(value)
elif hasattr(value, 'strftime'):
return value.strftime(self.format)
return value
def _has_changed(self, initial, data):
# If our field has show_hidden_initial=True, initial will be a string
# formatted by HiddenInput using formats.localize_input, which is not
# necessarily the format used for this widget. Attempt to convert it.
try:
input_format = formats.get_format('TIME_INPUT_FORMATS')[0]
initial = datetime.datetime.strptime(initial, input_format).time()
except (TypeError, ValueError):
pass
return super(TimeInput, self)._has_changed(self._format_value(initial), data)
# Defined at module level so that CheckboxInput is picklable (#17976)
def boolean_check(v):
return not (v is False or v is None or v == '')
class CheckboxInput(Widget):
def __init__(self, attrs=None, check_test=None):
super(CheckboxInput, self).__init__(attrs)
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.check_test = boolean_check if check_test is None else check_test
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
try:
result = self.check_test(value)
except: # Silently catch exceptions
result = False
if result:
final_attrs['checked'] = 'checked'
if not (value is True or value is False or value is None or value == ''):
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(value)
return mark_safe(u'<input%s />' % flatatt(final_attrs))
def value_from_datadict(self, data, files, name):
if name not in data:
# A missing value means False because HTML form submission does not
# send results for unselected checkboxes.
return False
value = data.get(name)
# Translate true and false strings to boolean values.
values = {'true': True, 'false': False}
if isinstance(value, basestring):
value = values.get(value.lower(), value)
return value
def _has_changed(self, initial, data):
# Sometimes data or initial could be None or u'' which should be the
# same thing as False.
return bool(initial) != bool(data)
class Select(Widget):
allow_multiple_selected = False
def __init__(self, attrs=None, choices=()):
super(Select, self).__init__(attrs)
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def render(self, name, value, attrs=None, choices=()):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<select%s>' % flatatt(final_attrs)]
options = self.render_options(choices, [value])
if options:
output.append(options)
output.append(u'</select>')
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choices, option_value, option_label):
option_value = force_unicode(option_value)
if option_value in selected_choices:
selected_html = u' selected="selected"'
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
return u'<option value="%s"%s>%s</option>' % (
escape(option_value), selected_html,
conditional_escape(force_unicode(option_label)))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = set(force_unicode(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append(u'<optgroup label="%s">' % escape(force_unicode(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option))
output.append(u'</optgroup>')
else:
output.append(self.render_option(selected_choices, option_value, option_label))
return u'\n'.join(output)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = ((u'1', ugettext_lazy('Unknown')),
(u'2', ugettext_lazy('Yes')),
(u'3', ugettext_lazy('No')))
super(NullBooleanSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
try:
value = {True: u'2', False: u'3', u'2': u'2', u'3': u'3'}[value]
except KeyError:
value = u'1'
return super(NullBooleanSelect, self).render(name, value, attrs, choices)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
return {u'2': True,
True: True,
'True': True,
u'3': False,
'False': False,
False: False}.get(value, None)
def _has_changed(self, initial, data):
# For a NullBooleanSelect, None (unknown) and False (No)
# are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class SelectMultiple(Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<select multiple="multiple"%s>' % flatatt(final_attrs)]
options = self.render_options(choices, value)
if options:
output.append(options)
output.append('</select>')
return mark_safe(u'\n'.join(output))
def value_from_datadict(self, data, files, name):
if isinstance(data, (MultiValueDict, MergeDict)):
return data.getlist(name)
return data.get(name, None)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set([force_unicode(value) for value in initial])
data_set = set([force_unicode(value) for value in data])
return data_set != initial_set
class RadioInput(SubWidget):
"""
An object used by RadioFieldRenderer that represents a single
<input type='radio'>.
"""
def __init__(self, name, value, attrs, choice, index):
self.name, self.value = name, value
self.attrs = attrs
self.choice_value = force_unicode(choice[0])
self.choice_label = force_unicode(choice[1])
self.index = index
def __unicode__(self):
return self.render()
def render(self, name=None, value=None, attrs=None, choices=()):
name = name or self.name
value = value or self.value
attrs = attrs or self.attrs
if 'id' in self.attrs:
label_for = ' for="%s_%s"' % (self.attrs['id'], self.index)
else:
label_for = ''
choice_label = conditional_escape(force_unicode(self.choice_label))
return mark_safe(u'<label%s>%s %s</label>' % (label_for, self.tag(), choice_label))
def is_checked(self):
return self.value == self.choice_value
def tag(self):
if 'id' in self.attrs:
self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index)
final_attrs = dict(self.attrs, type='radio', name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return mark_safe(u'<input%s />' % flatatt(final_attrs))
class RadioFieldRenderer(StrAndUnicode):
"""
An object used by RadioSelect to enable customization of radio widgets.
"""
def __init__(self, name, value, attrs, choices):
self.name, self.value, self.attrs = name, value, attrs
self.choices = choices
def __iter__(self):
for i, choice in enumerate(self.choices):
yield RadioInput(self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return RadioInput(self.name, self.value, self.attrs.copy(), choice, idx)
def __unicode__(self):
return self.render()
def render(self):
"""Outputs a <ul> for this set of radio fields."""
return mark_safe(u'<ul>\n%s\n</ul>' % u'\n'.join([u'<li>%s</li>'
% force_unicode(w) for w in self]))
class RadioSelect(Select):
renderer = RadioFieldRenderer
def __init__(self, *args, **kwargs):
# Override the default renderer if we were passed one.
renderer = kwargs.pop('renderer', None)
if renderer:
self.renderer = renderer
super(RadioSelect, self).__init__(*args, **kwargs)
def subwidgets(self, name, value, attrs=None, choices=()):
for widget in self.get_renderer(name, value, attrs, choices):
yield widget
def get_renderer(self, name, value, attrs=None, choices=()):
"""Returns an instance of the renderer."""
if value is None: value = ''
str_value = force_unicode(value) # Normalize to string.
final_attrs = self.build_attrs(attrs)
choices = list(chain(self.choices, choices))
return self.renderer(name, str_value, final_attrs, choices)
def render(self, name, value, attrs=None, choices=()):
return self.get_renderer(name, value, attrs, choices).render()
def id_for_label(self, id_):
# RadioSelect is represented by multiple <input type="radio"> fields,
# each of which has a distinct ID. The IDs are made distinct by a "_X"
# suffix, where X is the zero-based index of the radio field. Thus,
# the label for a RadioSelect should reference the first one ('_0').
if id_:
id_ += '_0'
return id_
class CheckboxSelectMultiple(SelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<ul>']
# Normalize to strings
str_values = set([force_unicode(v) for v in value])
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = u' for="%s"' % final_attrs['id']
else:
label_for = ''
cb = CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_unicode(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_unicode(option_label))
output.append(u'<li><label%s>%s %s</label></li>' % (label_for, rendered_cb, option_label))
output.append(u'</ul>')
return mark_safe(u'\n'.join(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
Its render() method is different than other widgets', because it has to
figure out how to split a single value for display in multiple widgets.
The ``value`` argument can be one of two things:
* A list.
* A normal value (e.g., a string) that has been "compressed" from
a list of values.
In the second case -- i.e., if the value is NOT a list -- render() will
first "decompress" the value into a list before rendering it. It does so by
calling the decompress() method, which MultiWidget subclasses must
implement. This method takes a single "compressed" value and returns a
list.
When render() does its HTML rendering, each value in the list is rendered
with the corresponding widget -- the first value is rendered in the first
widget, the second value is rendered in the second widget, etc.
Subclasses may implement format_output(), which takes the list of rendered
widgets and returns a string of HTML that formats them any way you'd like.
You'll probably want to use this class with MultiValueField.
"""
def __init__(self, widgets, attrs=None):
self.widgets = [isinstance(w, type) and w() or w for w in widgets]
super(MultiWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def value_from_datadict(self, data, files, name):
return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def _has_changed(self, initial, data):
if initial is None:
initial = [u'' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.decompress(initial)
for widget, initial, data in zip(self.widgets, initial, data):
if widget._has_changed(initial, data):
return True
return False
def format_output(self, rendered_widgets):
"""
Given a list of rendered widgets (as strings), returns a Unicode string
representing the HTML for the whole lot.
This hook allows you to format the HTML design of the widgets, if
needed.
"""
return u''.join(rendered_widgets)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
for w in self.widgets:
media = media + w.media
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(MultiWidget, self).__deepcopy__(memo)
obj.widgets = copy.deepcopy(self.widgets)
return obj
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
def __init__(self, attrs=None, date_format=None, time_format=None):
widgets = (DateInput(attrs=attrs, format=date_format),
TimeInput(attrs=attrs, format=time_format))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
value = to_current_timezone(value)
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
class SplitHiddenDateTimeWidget(SplitDateTimeWidget):
"""
A Widget that splits datetime input into two <input type="hidden"> inputs.
"""
is_hidden = True
def __init__(self, attrs=None, date_format=None, time_format=None):
super(SplitHiddenDateTimeWidget, self).__init__(attrs, date_format, time_format)
for widget in self.widgets:
widget.input_type = 'hidden'
widget.is_hidden = True
|
etherkit/OpenBeacon2 | refs/heads/master | macos/venv/lib/python3.8/site-packages/PyInstaller/hooks/hook-PyQt5.QtWebEngineWidgets.py | 3 | #-----------------------------------------------------------------------------
# Copyright (c) 2014-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks.qt import add_qt5_dependencies, pyqt5_library_info
from PyInstaller.utils.hooks import remove_prefix, get_module_file_attribute, \
collect_system_data_files
from PyInstaller.depend.bindepend import getImports
import PyInstaller.compat as compat
# Ensure PyQt5 is importable before adding info depending on it.
if pyqt5_library_info.version:
hiddenimports, binaries, datas = add_qt5_dependencies(__file__)
# Include the web engine process, translations, and resources.
rel_data_path = ['PyQt5', 'Qt']
if compat.is_darwin:
# This is based on the layout of the Mac wheel from PyPi.
data_path = pyqt5_library_info.location['DataPath']
libraries = ['QtCore', 'QtWebEngineCore', 'QtQuick', 'QtQml',
'QtQmlModels', 'QtNetwork', 'QtGui', 'QtWebChannel',
'QtPositioning']
for i in libraries:
datas += collect_system_data_files(
os.path.join(data_path, 'lib', i + '.framework'),
os.path.join(*(rel_data_path + ['lib'])), True)
datas += [(os.path.join(data_path, 'lib', 'QtWebEngineCore.framework',
'Resources'), os.curdir)]
else:
locales = 'qtwebengine_locales'
resources = 'resources'
datas += [
# Gather translations needed by Chromium.
(os.path.join(pyqt5_library_info.location['TranslationsPath'],
locales),
os.path.join('PyQt5', 'Qt', 'translations', locales)),
# Per the `docs <https://doc.qt.io/qt-5.10/qtwebengine-deploying.html#deploying-resources>`_,
# ``DataPath`` is the base directory for ``resources``.
#
# When Python 3.4 goes EOL (see `PEP 448`_, this is better written as
# ``os.path.join(*rel_data_path, resources)``.
(os.path.join(pyqt5_library_info.location['DataPath'], resources),
os.path.join(*(rel_data_path + [resources]))),
# Include the webengine process. The ``LibraryExecutablesPath`` is only
# valid on Windows and Linux.
#
# Again, rewrite when Python 3.4 is EOL to
# ``os.path.join(*rel_data_path, remove_prefix(...``.
(os.path.join(pyqt5_library_info.location['LibraryExecutablesPath'],
'QtWebEngineProcess*'),
os.path.join(*(rel_data_path +
[remove_prefix(pyqt5_library_info.location['LibraryExecutablesPath'],
pyqt5_library_info.location['PrefixPath'] + '/')])))
]
# Add Linux-specific libraries.
if compat.is_linux:
# The automatic library detection fails for `NSS
# <https://packages.ubuntu.com/search?keywords=libnss3>`_, which is used by
# QtWebEngine. In some distributions, the ``libnss`` supporting libraries
# are stored in a subdirectory ``nss``. Since ``libnss`` is not statically
# linked to these, but dynamically loads them, we need to search for and add
# them.
#
# First, get all libraries linked to ``PyQt5.QtWebEngineWidgets``.
for imp in getImports(get_module_file_attribute('PyQt5.QtWebEngineWidgets')):
# Look for ``libnss3.so``.
if os.path.basename(imp).startswith('libnss3.so'):
# Find the location of NSS: given a ``/path/to/libnss.so``,
# add ``/path/to/nss/*.so`` to get the missing NSS libraries.
nss_subdir = os.path.join(os.path.dirname(imp), 'nss')
if os.path.exists(nss_subdir):
binaries.append((os.path.join(nss_subdir, '*.so'), 'nss'))
|
ArseniyK/Sunflower | refs/heads/master | application/main.py | 2 | import os
import sys
try:
# try to import GTK
import pygtk
pygtk.require20()
import gtk
except:
# print error and die
print "Error starting Sunflower, missing GTK 2.0+"
sys.exit(1)
try:
from setproctitle import setproctitle
setproctitle('sunflower')
except ImportError:
pass
# add search path
application_path = os.path.abspath(os.path.dirname(sys.argv[0]))
if application_path not in sys.path:
sys.path.insert(1, application_path)
# initialize threads
gtk.gdk.threads_init()
with gtk.gdk.lock:
# construct main application object
from gui.main_window import MainWindow
app = MainWindow()
app.run()
|
pquentin/django | refs/heads/stable/1.8.x | tests/auth_tests/test_forms.py | 12 | from __future__ import unicode_literals
import re
from django import forms
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm,
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget, SetPasswordForm,
UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field
from django.test import TestCase, override_settings
from django.utils import translation
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from .settings import AUTH_TEMPLATES
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserCreationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: jsmith@example.com>')
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class AuthenticationFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class SetPasswordFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class PasswordChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserChangeFormTest(TestCase):
fixtures = ['authtestdata.json']
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@override_settings(
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
)
class PasswordResetFormTest(TestCase):
fixtures = ['authtestdata.json']
@classmethod
def setUpClass(cls):
super(PasswordResetFormTest, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[("Really sorry to hear you forgot your password.",
"text/html")]).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Test that inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(
re.match(r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload())
)
class ReadOnlyPasswordHashTest(TestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
|
KMK-ONLINE/ansible | refs/heads/devel | lib/ansible/galaxy/token.py | 68 | #!/usr/bin/env python
########################################################################
#
# (C) 2015, Chris Houseknecht <chouse@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import yaml
from stat import *
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyToken(object):
''' Class to storing and retrieving token in ~/.ansible_galaxy '''
def __init__(self):
self.file = os.path.expanduser("~") + '/.ansible_galaxy'
self.config = yaml.safe_load(self.__open_config_for_read())
if not self.config:
self.config = {}
def __open_config_for_read(self):
if os.path.isfile(self.file):
display.vvv('Opened %s' % self.file)
return open(self.file, 'r')
# config.yml not found, create and chomd u+rw
f = open(self.file,'w')
f.close()
os.chmod(self.file,S_IRUSR|S_IWUSR) # owner has +rw
display.vvv('Created %s' % self.file)
return open(self.file, 'r')
def set(self, token):
self.config['token'] = token
self.save()
def get(self):
return self.config.get('token', None)
def save(self):
with open(self.file,'w') as f:
yaml.safe_dump(self.config,f,default_flow_style=False)
|
nhomar/odoo | refs/heads/8.0 | openerp/osv/expression.py | 42 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Domain expression processing
The main duty of this module is to compile a domain expression into a
SQL query. A lot of things should be documented here, but as a first
step in the right direction, some tests in test_osv_expression.yml
might give you some additional information.
For legacy reasons, a domain uses an inconsistent two-levels abstract
syntax (domains are regular Python data structures). At the first
level, a domain is an expression made of terms (sometimes called
leaves) and (domain) operators used in prefix notation. The available
operators at this level are '!', '&', and '|'. '!' is a unary 'not',
'&' is a binary 'and', and '|' is a binary 'or'. For instance, here
is a possible domain. (<term> stands for an arbitrary term, more on
this later.)::
['&', '!', <term1>, '|', <term2>, <term3>]
It is equivalent to this pseudo code using infix notation::
(not <term1>) and (<term2> or <term3>)
The second level of syntax deals with the term representation. A term
is a triple of the form (left, operator, right). That is, a term uses
an infix notation, and the available operators, and possible left and
right operands differ with those of the previous level. Here is a
possible term::
('company_id.name', '=', 'OpenERP')
The left and right operand don't have the same possible values. The
left operand is field name (related to the model for which the domain
applies). Actually, the field name can use the dot-notation to
traverse relationships. The right operand is a Python value whose
type should match the used operator and field type. In the above
example, a string is used because the name field of a company has type
string, and because we use the '=' operator. When appropriate, a 'in'
operator can be used, and thus the right operand should be a list.
Note: the non-uniform syntax could have been more uniform, but this
would hide an important limitation of the domain syntax. Say that the
term representation was ['=', 'company_id.name', 'OpenERP']. Used in a
complete domain, this would look like::
['!', ['=', 'company_id.name', 'OpenERP']]
and you would be tempted to believe something like this would be
possible::
['!', ['=', 'company_id.name', ['&', ..., ...]]]
That is, a domain could be a valid operand. But this is not the
case. A domain is really limited to a two-level nature, and can not
take a recursive form: a domain is not a valid second-level operand.
Unaccent - Accent-insensitive search
OpenERP will use the SQL function 'unaccent' when available for the
'ilike' and 'not ilike' operators, and enabled in the configuration.
Normally the 'unaccent' function is obtained from `the PostgreSQL
'unaccent' contrib module
<http://developer.postgresql.org/pgdocs/postgres/unaccent.html>`_.
.. todo: The following explanation should be moved in some external
installation guide
The steps to install the module might differ on specific PostgreSQL
versions. We give here some instruction for PostgreSQL 9.x on a
Ubuntu system.
Ubuntu doesn't come yet with PostgreSQL 9.x, so an alternative package
source is used. We use Martin Pitt's PPA available at
`ppa:pitti/postgresql
<https://launchpad.net/~pitti/+archive/postgresql>`_.
.. code-block:: sh
> sudo add-apt-repository ppa:pitti/postgresql
> sudo apt-get update
Once the package list is up-to-date, you have to install PostgreSQL
9.0 and its contrib modules.
.. code-block:: sh
> sudo apt-get install postgresql-9.0 postgresql-contrib-9.0
When you want to enable unaccent on some database:
.. code-block:: sh
> psql9 <database> -f /usr/share/postgresql/9.0/contrib/unaccent.sql
Here :program:`psql9` is an alias for the newly installed PostgreSQL
9.0 tool, together with the correct port if necessary (for instance if
PostgreSQL 8.4 is running on 5432). (Other aliases can be used for
createdb and dropdb.)
.. code-block:: sh
> alias psql9='/usr/lib/postgresql/9.0/bin/psql -p 5433'
You can check unaccent is working:
.. code-block:: sh
> psql9 <database> -c"select unaccent('hélène')"
Finally, to instruct OpenERP to really use the unaccent function, you have to
start the server specifying the ``--unaccent`` flag.
"""
import collections
import logging
import traceback
import openerp.modules
from . import fields
from ..models import MAGIC_COLUMNS, BaseModel
import openerp.tools as tools
# Domain operators.
NOT_OPERATOR = '!'
OR_OPERATOR = '|'
AND_OPERATOR = '&'
DOMAIN_OPERATORS = (NOT_OPERATOR, OR_OPERATOR, AND_OPERATOR)
# List of available term operators. It is also possible to use the '<>'
# operator, which is strictly the same as '!='; the later should be prefered
# for consistency. This list doesn't contain '<>' as it is simpified to '!='
# by the normalize_operator() function (so later part of the code deals with
# only one representation).
# Internals (i.e. not available to the user) 'inselect' and 'not inselect'
# operators are also used. In this case its right operand has the form (subselect, params).
TERM_OPERATORS = ('=', '!=', '<=', '<', '>', '>=', '=?', '=like', '=ilike',
'like', 'not like', 'ilike', 'not ilike', 'in', 'not in',
'child_of')
# A subset of the above operators, with a 'negative' semantic. When the
# expressions 'in NEGATIVE_TERM_OPERATORS' or 'not in NEGATIVE_TERM_OPERATORS' are used in the code
# below, this doesn't necessarily mean that any of those NEGATIVE_TERM_OPERATORS is
# legal in the processed term.
NEGATIVE_TERM_OPERATORS = ('!=', 'not like', 'not ilike', 'not in')
TRUE_LEAF = (1, '=', 1)
FALSE_LEAF = (0, '=', 1)
TRUE_DOMAIN = [TRUE_LEAF]
FALSE_DOMAIN = [FALSE_LEAF]
_logger = logging.getLogger(__name__)
# --------------------------------------------------
# Generic domain manipulation
# --------------------------------------------------
def normalize_domain(domain):
"""Returns a normalized version of ``domain_expr``, where all implicit '&' operators
have been made explicit. One property of normalized domain expressions is that they
can be easily combined together as if they were single domain components.
"""
assert isinstance(domain, (list, tuple)), "Domains to normalize must have a 'domain' form: a list or tuple of domain components"
if not domain:
return TRUE_DOMAIN
result = []
expected = 1 # expected number of expressions
op_arity = {NOT_OPERATOR: 1, AND_OPERATOR: 2, OR_OPERATOR: 2}
for token in domain:
if expected == 0: # more than expected, like in [A, B]
result[0:0] = [AND_OPERATOR] # put an extra '&' in front
expected = 1
result.append(token)
if isinstance(token, (list, tuple)): # domain term
expected -= 1
else:
expected += op_arity.get(token, 0) - 1
assert expected == 0, 'This domain is syntactically not correct: %s' % (domain)
return result
def combine(operator, unit, zero, domains):
"""Returns a new domain expression where all domain components from ``domains``
have been added together using the binary operator ``operator``. The given
domains must be normalized.
:param unit: the identity element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``i`` which, when
combined with any domain ``x`` via ``operator``, yields ``x``.
E.g. [(1,'=',1)] is the typical unit for AND_OPERATOR: adding it
to any domain component gives the same domain.
:param zero: the absorbing element of the domains "set" with regard to the operation
performed by ``operator``, i.e the domain component ``z`` which, when
combined with any domain ``x`` via ``operator``, yields ``z``.
E.g. [(1,'=',1)] is the typical zero for OR_OPERATOR: as soon as
you see it in a domain component the resulting domain is the zero.
:param domains: a list of normalized domains.
"""
result = []
count = 0
for domain in domains:
if domain == unit:
continue
if domain == zero:
return zero
if domain:
result += domain
count += 1
result = [operator] * (count - 1) + result
return result
def AND(domains):
"""AND([D1,D2,...]) returns a domain representing D1 and D2 and ... """
return combine(AND_OPERATOR, TRUE_DOMAIN, FALSE_DOMAIN, domains)
def OR(domains):
"""OR([D1,D2,...]) returns a domain representing D1 or D2 or ... """
return combine(OR_OPERATOR, FALSE_DOMAIN, TRUE_DOMAIN, domains)
def distribute_not(domain):
""" Distribute any '!' domain operators found inside a normalized domain.
Because we don't use SQL semantic for processing a 'left not in right'
query (i.e. our 'not in' is not simply translated to a SQL 'not in'),
it means that a '! left in right' can not be simply processed
by __leaf_to_sql by first emitting code for 'left in right' then wrapping
the result with 'not (...)', as it would result in a 'not in' at the SQL
level.
This function is thus responsible for pushing any '!' domain operators
inside the terms themselves. For example::
['!','&',('user_id','=',4),('partner_id','in',[1,2])]
will be turned into:
['|',('user_id','!=',4),('partner_id','not in',[1,2])]
"""
def negate(leaf):
"""Negates and returns a single domain leaf term,
using the opposite operator if possible"""
left, operator, right = leaf
mapping = {
'<': '>=',
'>': '<=',
'<=': '>',
'>=': '<',
'=': '!=',
'!=': '=',
}
if operator in ('in', 'like', 'ilike'):
operator = 'not ' + operator
return [(left, operator, right)]
if operator in ('not in', 'not like', 'not ilike'):
operator = operator[4:]
return [(left, operator, right)]
if operator in mapping:
operator = mapping[operator]
return [(left, operator, right)]
return [NOT_OPERATOR, (left, operator, right)]
def distribute_negate(domain):
"""Negate the domain ``subtree`` rooted at domain[0],
leaving the rest of the domain intact, and return
(negated_subtree, untouched_domain_rest)
"""
if is_leaf(domain[0]):
return negate(domain[0]), domain[1:]
if domain[0] == AND_OPERATOR:
done1, todo1 = distribute_negate(domain[1:])
done2, todo2 = distribute_negate(todo1)
return [OR_OPERATOR] + done1 + done2, todo2
if domain[0] == OR_OPERATOR:
done1, todo1 = distribute_negate(domain[1:])
done2, todo2 = distribute_negate(todo1)
return [AND_OPERATOR] + done1 + done2, todo2
if not domain:
return []
if domain[0] != NOT_OPERATOR:
return [domain[0]] + distribute_not(domain[1:])
if domain[0] == NOT_OPERATOR:
done, todo = distribute_negate(domain[1:])
return done + distribute_not(todo)
# --------------------------------------------------
# Generic leaf manipulation
# --------------------------------------------------
def _quote(to_quote):
if '"' not in to_quote:
return '"%s"' % to_quote
return to_quote
def generate_table_alias(src_table_alias, joined_tables=[]):
""" Generate a standard table alias name. An alias is generated as following:
- the base is the source table name (that can already be an alias)
- then, each joined table is added in the alias using a 'link field name'
that is used to render unique aliases for a given path
- returns a tuple composed of the alias, and the full table alias to be
added in a from condition with quoting done
Examples:
- src_table_alias='res_users', join_tables=[]:
alias = ('res_users','"res_users"')
- src_model='res_users', join_tables=[(res.partner, 'parent_id')]
alias = ('res_users__parent_id', '"res_partner" as "res_users__parent_id"')
:param model src_table_alias: model source of the alias
:param list joined_tables: list of tuples
(dst_model, link_field)
:return tuple: (table_alias, alias statement for from clause with quotes added)
"""
alias = src_table_alias
if not joined_tables:
return '%s' % alias, '%s' % _quote(alias)
for link in joined_tables:
alias += '__' + link[1]
assert len(alias) < 64, 'Table alias name %s is longer than the 64 characters size accepted by default in postgresql.' % alias
return '%s' % alias, '%s as %s' % (_quote(joined_tables[-1][0]), _quote(alias))
def get_alias_from_query(from_query):
""" :param string from_query: is something like :
- '"res_partner"' OR
- '"res_partner" as "res_users__partner_id"''
"""
from_splitted = from_query.split(' as ')
if len(from_splitted) > 1:
return from_splitted[0].replace('"', ''), from_splitted[1].replace('"', '')
else:
return from_splitted[0].replace('"', ''), from_splitted[0].replace('"', '')
def normalize_leaf(element):
""" Change a term's operator to some canonical form, simplifying later
processing. """
if not is_leaf(element):
return element
left, operator, right = element
original = operator
operator = operator.lower()
if operator == '<>':
operator = '!='
if isinstance(right, bool) and operator in ('in', 'not in'):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % ((left, original, right),))
operator = '=' if operator == 'in' else '!='
if isinstance(right, (list, tuple)) and operator in ('=', '!='):
_logger.warning("The domain term '%s' should use the 'in' or 'not in' operator." % ((left, original, right),))
operator = 'in' if operator == '=' else 'not in'
return left, operator, right
def is_operator(element):
""" Test whether an object is a valid domain operator. """
return isinstance(element, basestring) and element in DOMAIN_OPERATORS
def is_leaf(element, internal=False):
""" Test whether an object is a valid domain term:
- is a list or tuple
- with 3 elements
- second element if a valid op
:param tuple element: a leaf in form (left, operator, right)
:param boolean internal: allow or not the 'inselect' internal operator
in the term. This should be always left to False.
Note: OLD TODO change the share wizard to use this function.
"""
INTERNAL_OPS = TERM_OPERATORS + ('<>',)
if internal:
INTERNAL_OPS += ('inselect', 'not inselect')
return (isinstance(element, tuple) or isinstance(element, list)) \
and len(element) == 3 \
and element[1] in INTERNAL_OPS \
and ((isinstance(element[0], basestring) and element[0])
or element in (TRUE_LEAF, FALSE_LEAF))
# --------------------------------------------------
# SQL utils
# --------------------------------------------------
def select_from_where(cr, select_field, from_table, where_field, where_ids, where_operator):
# todo: merge into parent query as sub-query
res = []
if where_ids:
if where_operator in ['<', '>', '>=', '<=']:
cr.execute('SELECT "%s" FROM "%s" WHERE "%s" %s %%s' % \
(select_field, from_table, where_field, where_operator),
(where_ids[0],)) # TODO shouldn't this be min/max(where_ids) ?
res = [r[0] for r in cr.fetchall()]
else: # TODO where_operator is supposed to be 'in'? It is called with child_of...
for i in range(0, len(where_ids), cr.IN_MAX):
subids = where_ids[i:i + cr.IN_MAX]
cr.execute('SELECT "%s" FROM "%s" WHERE "%s" IN %%s' % \
(select_field, from_table, where_field), (tuple(subids),))
res.extend([r[0] for r in cr.fetchall()])
return res
def select_distinct_from_where_not_null(cr, select_field, from_table):
cr.execute('SELECT distinct("%s") FROM "%s" where "%s" is not null' % (select_field, from_table, select_field))
return [r[0] for r in cr.fetchall()]
def get_unaccent_wrapper(cr):
if openerp.modules.registry.RegistryManager.get(cr.dbname).has_unaccent:
return lambda x: "unaccent(%s)" % (x,)
return lambda x: x
# --------------------------------------------------
# ExtendedLeaf class for managing leafs and contexts
# -------------------------------------------------
class ExtendedLeaf(object):
""" Class wrapping a domain leaf, and giving some services and management
features on it. In particular it managed join contexts to be able to
construct queries through multiple models.
"""
# --------------------------------------------------
# Join / Context manipulation
# running examples:
# - res_users.name, like, foo: name is on res_partner, not on res_users
# - res_partner.bank_ids.name, like, foo: bank_ids is a one2many with _auto_join
# - res_partner.state_id.name, like, foo: state_id is a many2one with _auto_join
# A join:
# - link between src_table and dst_table, using src_field and dst_field
# i.e.: inherits: res_users.partner_id = res_partner.id
# i.e.: one2many: res_partner.id = res_partner_bank.partner_id
# i.e.: many2one: res_partner.state_id = res_country_state.id
# - done in the context of a field
# i.e.: inherits: 'partner_id'
# i.e.: one2many: 'bank_ids'
# i.e.: many2one: 'state_id'
# - table names use aliases: initial table followed by the context field
# names, joined using a '__'
# i.e.: inherits: res_partner as res_users__partner_id
# i.e.: one2many: res_partner_bank as res_partner__bank_ids
# i.e.: many2one: res_country_state as res_partner__state_id
# - join condition use aliases
# i.e.: inherits: res_users.partner_id = res_users__partner_id.id
# i.e.: one2many: res_partner.id = res_partner__bank_ids.parr_id
# i.e.: many2one: res_partner.state_id = res_partner__state_id.id
# Variables explanation:
# - src_table: working table before the join
# -> res_users, res_partner, res_partner
# - dst_table: working table after the join
# -> res_partner, res_partner_bank, res_country_state
# - src_table_link_name: field name used to link the src table, not
# necessarily a field (because 'id' is not a field instance)
# i.e.: inherits: 'partner_id', found in the inherits of the current table
# i.e.: one2many: 'id', not a field
# i.e.: many2one: 'state_id', the current field name
# - dst_table_link_name: field name used to link the dst table, not
# necessarily a field (because 'id' is not a field instance)
# i.e.: inherits: 'id', not a field
# i.e.: one2many: 'partner_id', _fields_id of the current field
# i.e.: many2one: 'id', not a field
# - context_field_name: field name used as a context to make the alias
# i.e.: inherits: 'partner_id': found in the inherits of the current table
# i.e.: one2many: 'bank_ids': current field name
# i.e.: many2one: 'state_id': current field name
# --------------------------------------------------
def __init__(self, leaf, model, join_context=None):
""" Initialize the ExtendedLeaf
:attr [string, tuple] leaf: operator or tuple-formatted domain
expression
:attr obj model: current working model
:attr list _models: list of chained models, updated when
adding joins
:attr list join_context: list of join contexts. This is a list of
tuples like ``(lhs, table, lhs_col, col, link)``
where
lhs
source (left hand) model
model
destination (right hand) model
lhs_col
source model column for join condition
col
destination model column for join condition
link
link column between source and destination model
that is not necessarily (but generally) a real column used
in the condition (i.e. in many2one); this link is used to
compute aliases
"""
assert isinstance(model, BaseModel), 'Invalid leaf creation without table'
self.join_context = join_context or []
self.leaf = leaf
# normalize the leaf's operator
self.normalize_leaf()
# set working variables; handle the context stack and previous tables
self.model = model
self._models = []
for item in self.join_context:
self._models.append(item[0])
self._models.append(model)
# check validity
self.check_leaf()
def __str__(self):
return '<osv.ExtendedLeaf: %s on %s (ctx: %s)>' % (str(self.leaf), self.model._table, ','.join(self._get_context_debug()))
def generate_alias(self):
links = [(context[1]._table, context[4]) for context in self.join_context]
alias, alias_statement = generate_table_alias(self._models[0]._table, links)
return alias
def add_join_context(self, model, lhs_col, table_col, link):
""" See above comments for more details. A join context is a tuple like:
``(lhs, model, lhs_col, col, link)``
After adding the join, the model of the current leaf is updated.
"""
self.join_context.append((self.model, model, lhs_col, table_col, link))
self._models.append(model)
self.model = model
def get_join_conditions(self):
conditions = []
alias = self._models[0]._table
for context in self.join_context:
previous_alias = alias
alias += '__' + context[4]
conditions.append('"%s"."%s"="%s"."%s"' % (previous_alias, context[2], alias, context[3]))
return conditions
def get_tables(self):
tables = set()
links = []
for context in self.join_context:
links.append((context[1]._table, context[4]))
alias, alias_statement = generate_table_alias(self._models[0]._table, links)
tables.add(alias_statement)
return tables
def _get_context_debug(self):
names = ['"%s"."%s"="%s"."%s" (%s)' % (item[0]._table, item[2], item[1]._table, item[3], item[4]) for item in self.join_context]
return names
# --------------------------------------------------
# Leaf manipulation
# --------------------------------------------------
def check_leaf(self):
""" Leaf validity rules:
- a valid leaf is an operator or a leaf
- a valid leaf has a field objects unless
- it is not a tuple
- it is an inherited field
- left is id, operator is 'child_of'
- left is in MAGIC_COLUMNS
"""
if not is_operator(self.leaf) and not is_leaf(self.leaf, True):
raise ValueError("Invalid leaf %s" % str(self.leaf))
def is_operator(self):
return is_operator(self.leaf)
def is_true_leaf(self):
return self.leaf == TRUE_LEAF
def is_false_leaf(self):
return self.leaf == FALSE_LEAF
def is_leaf(self, internal=False):
return is_leaf(self.leaf, internal=internal)
def normalize_leaf(self):
self.leaf = normalize_leaf(self.leaf)
return True
def create_substitution_leaf(leaf, new_elements, new_model=None):
""" From a leaf, create a new leaf (based on the new_elements tuple
and new_model), that will have the same join context. Used to
insert equivalent leafs in the processing stack. """
if new_model is None:
new_model = leaf.model
new_join_context = [tuple(context) for context in leaf.join_context]
new_leaf = ExtendedLeaf(new_elements, new_model, join_context=new_join_context)
return new_leaf
class expression(object):
""" Parse a domain expression
Use a real polish notation
Leafs are still in a ('foo', '=', 'bar') format
For more info: http://christophe-simonis-at-tiny.blogspot.com/2008/08/new-new-domain-notation.html
"""
def __init__(self, cr, uid, exp, table, context):
""" Initialize expression object and automatically parse the expression
right after initialization.
:param exp: expression (using domain ('foo', '=', 'bar' format))
:param table: root model
:attr list result: list that will hold the result of the parsing
as a list of ExtendedLeaf
:attr list joins: list of join conditions, such as
(res_country_state."id" = res_partner."state_id")
:attr root_model: base model for the query
:attr list expression: the domain expression, that will be normalized
and prepared
"""
self._unaccent = get_unaccent_wrapper(cr)
self.joins = []
self.root_model = table
# normalize and prepare the expression for parsing
self.expression = distribute_not(normalize_domain(exp))
# parse the domain expression
self.parse(cr, uid, context=context)
# ----------------------------------------
# Leafs management
# ----------------------------------------
def get_tables(self):
""" Returns the list of tables for SQL queries, like select from ... """
tables = []
for leaf in self.result:
for table in leaf.get_tables():
if table not in tables:
tables.append(table)
table_name = _quote(self.root_model._table)
if table_name not in tables:
tables.append(table_name)
return tables
# ----------------------------------------
# Parsing
# ----------------------------------------
def parse(self, cr, uid, context):
""" Transform the leaves of the expression
The principle is to pop elements from a leaf stack one at a time.
Each leaf is processed. The processing is a if/elif list of various
cases that appear in the leafs (many2one, function fields, ...).
Two things can happen as a processing result:
- the leaf has been modified and/or new leafs have to be introduced
in the expression; they are pushed into the leaf stack, to be
processed right after
- the leaf is added to the result
Some internal var explanation:
:var list path: left operand seen as a sequence of field names
("foo.bar" -> ["foo", "bar"])
:var obj model: model object, model containing the field
(the name provided in the left operand)
:var obj field: the field corresponding to `path[0]`
:var obj column: the column corresponding to `path[0]`
:var obj comodel: relational model of field (field.comodel)
(res_partner.bank_ids -> res.partner.bank)
"""
def to_ids(value, comodel, context=None, limit=None):
""" Normalize a single id or name, or a list of those, into a list of ids
:param {int,long,basestring,list,tuple} value:
if int, long -> return [value]
if basestring, convert it into a list of basestrings, then
if list of basestring ->
perform a name_search on comodel for each name
return the list of related ids
"""
names = []
if isinstance(value, basestring):
names = [value]
elif value and isinstance(value, (tuple, list)) and all(isinstance(item, basestring) for item in value):
names = value
elif isinstance(value, (int, long)):
return [value]
if names:
name_get_list = [name_get[0] for name in names for name_get in comodel.name_search(cr, uid, name, [], 'ilike', context=context, limit=limit)]
return list(set(name_get_list))
return list(value)
def child_of_domain(left, ids, left_model, parent=None, prefix='', context=None):
""" Return a domain implementing the child_of operator for [(left,child_of,ids)],
either as a range using the parent_left/right tree lookup fields
(when available), or as an expanded [(left,in,child_ids)] """
if left_model._parent_store and (not left_model.pool._init):
# TODO: Improve where joins are implemented for many with '.', replace by:
# doms += ['&',(prefix+'.parent_left','<',o.parent_right),(prefix+'.parent_left','>=',o.parent_left)]
doms = []
for o in left_model.browse(cr, uid, ids, context=context):
if doms:
doms.insert(0, OR_OPERATOR)
doms += [AND_OPERATOR, ('parent_left', '<', o.parent_right), ('parent_left', '>=', o.parent_left)]
if prefix:
return [(left, 'in', left_model.search(cr, uid, doms, context=context))]
return doms
else:
def recursive_children(ids, model, parent_field):
if not ids:
return []
ids2 = model.search(cr, uid, [(parent_field, 'in', ids)], context=context)
return ids + recursive_children(ids2, model, parent_field)
return [(left, 'in', recursive_children(ids, left_model, parent or left_model._parent_name))]
def pop():
""" Pop a leaf to process. """
return self.stack.pop()
def push(leaf):
""" Push a leaf to be processed right after. """
self.stack.append(leaf)
def push_result(leaf):
""" Push a leaf to the results. This leaf has been fully processed
and validated. """
self.result.append(leaf)
self.result = []
self.stack = [ExtendedLeaf(leaf, self.root_model) for leaf in self.expression]
# process from right to left; expression is from left to right
self.stack.reverse()
while self.stack:
# Get the next leaf to process
leaf = pop()
# Get working variables
if leaf.is_operator():
left, operator, right = leaf.leaf, None, None
elif leaf.is_true_leaf() or leaf.is_false_leaf():
# because we consider left as a string
left, operator, right = ('%s' % leaf.leaf[0], leaf.leaf[1], leaf.leaf[2])
else:
left, operator, right = leaf.leaf
path = left.split('.', 1)
model = leaf.model
field = model._fields.get(path[0])
column = model._columns.get(path[0])
comodel = model.pool.get(getattr(field, 'comodel_name', None))
# ----------------------------------------
# SIMPLE CASE
# 1. leaf is an operator
# 2. leaf is a true/false leaf
# -> add directly to result
# ----------------------------------------
if leaf.is_operator() or leaf.is_true_leaf() or leaf.is_false_leaf():
push_result(leaf)
# ----------------------------------------
# FIELD NOT FOUND
# -> from inherits'd fields -> work on the related model, and add
# a join condition
# -> ('id', 'child_of', '..') -> use a 'to_ids'
# -> but is one on the _log_access special fields, add directly to
# result
# TODO: make these fields explicitly available in self.columns instead!
# -> else: crash
# ----------------------------------------
elif not column and path[0] in model._inherit_fields:
# comments about inherits'd fields
# { 'field_name': ('parent_model', 'm2o_field_to_reach_parent',
# field_column_obj, origina_parent_model), ... }
next_model = model.pool[model._inherit_fields[path[0]][0]]
leaf.add_join_context(next_model, model._inherits[next_model._name], 'id', model._inherits[next_model._name])
push(leaf)
elif left == 'id' and operator == 'child_of':
ids2 = to_ids(right, model, context)
dom = child_of_domain(left, ids2, model)
for dom_leaf in reversed(dom):
new_leaf = create_substitution_leaf(leaf, dom_leaf, model)
push(new_leaf)
elif not column and path[0] in MAGIC_COLUMNS:
push_result(leaf)
elif not field:
raise ValueError("Invalid field %r in leaf %r" % (left, str(leaf)))
# ----------------------------------------
# PATH SPOTTED
# -> many2one or one2many with _auto_join:
# - add a join, then jump into linked column: column.remaining on
# src_table is replaced by remaining on dst_table, and set for re-evaluation
# - if a domain is defined on the column, add it into evaluation
# on the relational table
# -> many2one, many2many, one2many: replace by an equivalent computed
# domain, given by recursively searching on the remaining of the path
# -> note: hack about columns.property should not be necessary anymore
# as after transforming the column, it will go through this loop once again
# ----------------------------------------
elif len(path) > 1 and column._type == 'many2one' and column._auto_join:
# res_partner.state_id = res_partner__state_id.id
leaf.add_join_context(comodel, path[0], 'id', path[0])
push(create_substitution_leaf(leaf, (path[1], operator, right), comodel))
elif len(path) > 1 and column._type == 'one2many' and column._auto_join:
# res_partner.id = res_partner__bank_ids.partner_id
leaf.add_join_context(comodel, 'id', column._fields_id, path[0])
domain = column._domain(model) if callable(column._domain) else column._domain
push(create_substitution_leaf(leaf, (path[1], operator, right), comodel))
if domain:
domain = normalize_domain(domain)
for elem in reversed(domain):
push(create_substitution_leaf(leaf, elem, comodel))
push(create_substitution_leaf(leaf, AND_OPERATOR, comodel))
elif len(path) > 1 and column._auto_join:
raise NotImplementedError('_auto_join attribute not supported on many2many column %s' % left)
elif len(path) > 1 and column._type == 'many2one':
right_ids = comodel.search(cr, uid, [(path[1], operator, right)], context=context)
leaf.leaf = (path[0], 'in', right_ids)
push(leaf)
# Making search easier when there is a left operand as column.o2m or column.m2m
elif len(path) > 1 and column._type in ['many2many', 'one2many']:
right_ids = comodel.search(cr, uid, [(path[1], operator, right)], context=context)
table_ids = model.search(cr, uid, [(path[0], 'in', right_ids)], context=dict(context, active_test=False))
leaf.leaf = ('id', 'in', table_ids)
push(leaf)
elif not column:
# Non-stored field should provide an implementation of search.
if not field.search:
# field does not support search!
_logger.error("Non-stored field %s cannot be searched.", field)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(''.join(traceback.format_stack()))
# Ignore it: generate a dummy leaf.
domain = []
else:
# Let the field generate a domain.
recs = model.browse(cr, uid, [], context)
domain = field.determine_domain(recs, operator, right)
if not domain:
leaf.leaf = TRUE_LEAF
push(leaf)
else:
for elem in reversed(domain):
push(create_substitution_leaf(leaf, elem, model))
# -------------------------------------------------
# FUNCTION FIELD
# -> not stored: error if no _fnct_search, otherwise handle the result domain
# -> stored: management done in the remaining of parsing
# -------------------------------------------------
elif isinstance(column, fields.function) and not column.store:
# this is a function field that is not stored
if not column._fnct_search:
_logger.error(
"Field '%s' (%s) can not be searched: "
"non-stored function field without fnct_search",
column.string, left)
# avoid compiling stack trace if not needed
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(''.join(traceback.format_stack()))
# ignore it: generate a dummy leaf
fct_domain = []
else:
fct_domain = column.search(cr, uid, model, left, [leaf.leaf], context=context)
if not fct_domain:
leaf.leaf = TRUE_LEAF
push(leaf)
else:
# we assume that the expression is valid
# we create a dummy leaf for forcing the parsing of the resulting expression
for domain_element in reversed(fct_domain):
push(create_substitution_leaf(leaf, domain_element, model))
# self.push(create_substitution_leaf(leaf, TRUE_LEAF, model))
# self.push(create_substitution_leaf(leaf, AND_OPERATOR, model))
# -------------------------------------------------
# RELATIONAL FIELDS
# -------------------------------------------------
# Applying recursivity on field(one2many)
elif column._type == 'one2many' and operator == 'child_of':
ids2 = to_ids(right, comodel, context)
if column._obj != model._name:
dom = child_of_domain(left, ids2, comodel, prefix=column._obj)
else:
dom = child_of_domain('id', ids2, model, parent=left)
for dom_leaf in reversed(dom):
push(create_substitution_leaf(leaf, dom_leaf, model))
elif column._type == 'one2many':
call_null = True
if right is not False:
if isinstance(right, basestring):
ids2 = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, context=context, limit=None)]
if ids2:
operator = 'in'
elif isinstance(right, collections.Iterable):
ids2 = right
else:
ids2 = [right]
if not ids2:
if operator in ['like', 'ilike', 'in', '=']:
#no result found with given search criteria
call_null = False
push(create_substitution_leaf(leaf, FALSE_LEAF, model))
else:
ids2 = select_from_where(cr, column._fields_id, comodel._table, 'id', ids2, operator)
if ids2:
call_null = False
o2m_op = 'not in' if operator in NEGATIVE_TERM_OPERATORS else 'in'
push(create_substitution_leaf(leaf, ('id', o2m_op, ids2), model))
if call_null:
o2m_op = 'in' if operator in NEGATIVE_TERM_OPERATORS else 'not in'
push(create_substitution_leaf(leaf, ('id', o2m_op, select_distinct_from_where_not_null(cr, column._fields_id, comodel._table)), model))
elif column._type == 'many2many':
rel_table, rel_id1, rel_id2 = column._sql_names(model)
#FIXME
if operator == 'child_of':
def _rec_convert(ids):
if comodel == model:
return ids
return select_from_where(cr, rel_id1, rel_table, rel_id2, ids, operator)
ids2 = to_ids(right, comodel, context)
dom = child_of_domain('id', ids2, comodel)
ids2 = comodel.search(cr, uid, dom, context=context)
push(create_substitution_leaf(leaf, ('id', 'in', _rec_convert(ids2)), model))
else:
call_null_m2m = True
if right is not False:
if isinstance(right, basestring):
res_ids = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, context=context)]
if res_ids:
operator = 'in'
else:
if not isinstance(right, list):
res_ids = [right]
else:
res_ids = right
if not res_ids:
if operator in ['like', 'ilike', 'in', '=']:
#no result found with given search criteria
call_null_m2m = False
push(create_substitution_leaf(leaf, FALSE_LEAF, model))
else:
operator = 'in' # operator changed because ids are directly related to main object
else:
call_null_m2m = False
m2m_op = 'not in' if operator in NEGATIVE_TERM_OPERATORS else 'in'
push(create_substitution_leaf(leaf, ('id', m2m_op, select_from_where(cr, rel_id1, rel_table, rel_id2, res_ids, operator) or [0]), model))
if call_null_m2m:
m2m_op = 'in' if operator in NEGATIVE_TERM_OPERATORS else 'not in'
push(create_substitution_leaf(leaf, ('id', m2m_op, select_distinct_from_where_not_null(cr, rel_id1, rel_table)), model))
elif column._type == 'many2one':
if operator == 'child_of':
ids2 = to_ids(right, comodel, context)
if column._obj != model._name:
dom = child_of_domain(left, ids2, comodel, prefix=column._obj)
else:
dom = child_of_domain('id', ids2, model, parent=left)
for dom_leaf in reversed(dom):
push(create_substitution_leaf(leaf, dom_leaf, model))
else:
def _get_expression(comodel, cr, uid, left, right, operator, context=None):
if context is None:
context = {}
c = context.copy()
c['active_test'] = False
#Special treatment to ill-formed domains
operator = (operator in ['<', '>', '<=', '>=']) and 'in' or operator
dict_op = {'not in': '!=', 'in': '=', '=': 'in', '!=': 'not in'}
if isinstance(right, tuple):
right = list(right)
if (not isinstance(right, list)) and operator in ['not in', 'in']:
operator = dict_op[operator]
elif isinstance(right, list) and operator in ['!=', '=']: # for domain (FIELD,'=',['value1','value2'])
operator = dict_op[operator]
res_ids = [x[0] for x in comodel.name_search(cr, uid, right, [], operator, limit=None, context=c)]
if operator in NEGATIVE_TERM_OPERATORS:
res_ids.append(False) # TODO this should not be appended if False was in 'right'
return left, 'in', res_ids
# resolve string-based m2o criterion into IDs
if isinstance(right, basestring) or \
right and isinstance(right, (tuple, list)) and all(isinstance(item, basestring) for item in right):
push(create_substitution_leaf(leaf, _get_expression(comodel, cr, uid, left, right, operator, context=context), model))
else:
# right == [] or right == False and all other cases are handled by __leaf_to_sql()
push_result(leaf)
# -------------------------------------------------
# OTHER FIELDS
# -> datetime fields: manage time part of the datetime
# column when it is not there
# -> manage translatable fields
# -------------------------------------------------
else:
if column._type == 'datetime' and right and len(right) == 10:
if operator in ('>', '<='):
right += ' 23:59:59'
else:
right += ' 00:00:00'
push(create_substitution_leaf(leaf, (left, operator, right), model))
elif column.translate and right:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
if need_wildcard:
right = '%%%s%%' % right
inselect_operator = 'inselect'
if sql_operator in NEGATIVE_TERM_OPERATORS:
# negate operator (fix lp:1071710)
sql_operator = sql_operator[4:] if sql_operator[:3] == 'not' else '='
inselect_operator = 'not inselect'
unaccent = self._unaccent if sql_operator.endswith('like') else lambda x: x
instr = unaccent('%s')
if sql_operator == 'in':
# params will be flatten by to_sql() => expand the placeholders
instr = '(%s)' % ', '.join(['%s'] * len(right))
subselect = """WITH temp_irt_current (id, name) as (
SELECT ct.id, coalesce(it.value,ct.{quote_left})
FROM {current_table} ct
LEFT JOIN ir_translation it ON (it.name = %s and
it.lang = %s and
it.type = %s and
it.res_id = ct.id and
it.value != '')
)
SELECT id FROM temp_irt_current WHERE {name} {operator} {right} order by name
""".format(current_table=model._table, quote_left=_quote(left), name=unaccent('name'),
operator=sql_operator, right=instr)
params = (
model._name + ',' + left,
context.get('lang') or 'en_US',
'model',
right,
)
push(create_substitution_leaf(leaf, ('id', inselect_operator, (subselect, params)), model))
else:
push_result(leaf)
# ----------------------------------------
# END OF PARSING FULL DOMAIN
# -> generate joins
# ----------------------------------------
joins = set()
for leaf in self.result:
joins |= set(leaf.get_join_conditions())
self.joins = list(joins)
def __leaf_to_sql(self, eleaf):
model = eleaf.model
leaf = eleaf.leaf
left, operator, right = leaf
# final sanity checks - should never fail
assert operator in (TERM_OPERATORS + ('inselect', 'not inselect')), \
"Invalid operator %r in domain term %r" % (operator, leaf)
assert leaf in (TRUE_LEAF, FALSE_LEAF) or left in model._fields \
or left in MAGIC_COLUMNS, "Invalid field %r in domain term %r" % (left, leaf)
assert not isinstance(right, BaseModel), \
"Invalid value %r in domain term %r" % (right, leaf)
table_alias = '"%s"' % (eleaf.generate_alias())
if leaf == TRUE_LEAF:
query = 'TRUE'
params = []
elif leaf == FALSE_LEAF:
query = 'FALSE'
params = []
elif operator == 'inselect':
query = '(%s."%s" in (%s))' % (table_alias, left, right[0])
params = right[1]
elif operator == 'not inselect':
query = '(%s."%s" not in (%s))' % (table_alias, left, right[0])
params = right[1]
elif operator in ['in', 'not in']:
# Two cases: right is a boolean or a list. The boolean case is an
# abuse and handled for backward compatibility.
if isinstance(right, bool):
_logger.warning("The domain term '%s' should use the '=' or '!=' operator." % (leaf,))
if operator == 'in':
r = 'NOT NULL' if right else 'NULL'
else:
r = 'NULL' if right else 'NOT NULL'
query = '(%s."%s" IS %s)' % (table_alias, left, r)
params = []
elif isinstance(right, (list, tuple)):
params = list(right)
check_nulls = False
for i in range(len(params))[::-1]:
if params[i] == False:
check_nulls = True
del params[i]
if params:
if left == 'id':
instr = ','.join(['%s'] * len(params))
else:
ss = model._columns[left]._symbol_set
instr = ','.join([ss[0]] * len(params))
params = map(ss[1], params)
query = '(%s."%s" %s (%s))' % (table_alias, left, operator, instr)
else:
# The case for (left, 'in', []) or (left, 'not in', []).
query = 'FALSE' if operator == 'in' else 'TRUE'
if check_nulls and operator == 'in':
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
elif not check_nulls and operator == 'not in':
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
elif check_nulls and operator == 'not in':
query = '(%s AND %s."%s" IS NOT NULL)' % (query, table_alias, left) # needed only for TRUE.
else: # Must not happen
raise ValueError("Invalid domain term %r" % (leaf,))
elif right == False and (left in model._columns) and model._columns[left]._type == "boolean" and (operator == '='):
query = '(%s."%s" IS NULL or %s."%s" = false )' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '='):
query = '%s."%s" IS NULL ' % (table_alias, left)
params = []
elif right == False and (left in model._columns) and model._columns[left]._type == "boolean" and (operator == '!='):
query = '(%s."%s" IS NOT NULL and %s."%s" != false)' % (table_alias, left, table_alias, left)
params = []
elif (right is False or right is None) and (operator == '!='):
query = '%s."%s" IS NOT NULL' % (table_alias, left)
params = []
elif operator == '=?':
if right is False or right is None:
# '=?' is a short-circuit that makes the term TRUE if right is None or False
query = 'TRUE'
params = []
else:
# '=?' behaves like '=' in other cases
query, params = self.__leaf_to_sql(
create_substitution_leaf(eleaf, (left, '=', right), model))
elif left == 'id':
query = '%s.id %s %%s' % (table_alias, operator)
params = right
else:
need_wildcard = operator in ('like', 'ilike', 'not like', 'not ilike')
sql_operator = {'=like': 'like', '=ilike': 'ilike'}.get(operator, operator)
cast = '::text' if sql_operator.endswith('like') else ''
if left in model._columns:
format = need_wildcard and '%s' or model._columns[left]._symbol_set[0]
unaccent = self._unaccent if sql_operator.endswith('like') else lambda x: x
column = '%s.%s' % (table_alias, _quote(left))
query = '(%s %s %s)' % (unaccent(column + cast), sql_operator, unaccent(format))
elif left in MAGIC_COLUMNS:
query = "(%s.\"%s\"%s %s %%s)" % (table_alias, left, cast, sql_operator)
params = right
else: # Must not happen
raise ValueError("Invalid field %r in domain term %r" % (left, leaf))
add_null = False
if need_wildcard:
if isinstance(right, str):
str_utf8 = right
elif isinstance(right, unicode):
str_utf8 = right.encode('utf-8')
else:
str_utf8 = str(right)
params = '%%%s%%' % str_utf8
add_null = not str_utf8
elif left in model._columns:
params = model._columns[left]._symbol_set[1](right)
if add_null:
query = '(%s OR %s."%s" IS NULL)' % (query, table_alias, left)
if isinstance(params, basestring):
params = [params]
return query, params
def to_sql(self):
stack = []
params = []
# Process the domain from right to left, using a stack, to generate a SQL expression.
self.result.reverse()
for leaf in self.result:
if leaf.is_leaf(internal=True):
q, p = self.__leaf_to_sql(leaf)
params.insert(0, p)
stack.append(q)
elif leaf.leaf == NOT_OPERATOR:
stack.append('(NOT (%s))' % (stack.pop(),))
else:
ops = {AND_OPERATOR: ' AND ', OR_OPERATOR: ' OR '}
q1 = stack.pop()
q2 = stack.pop()
stack.append('(%s %s %s)' % (q1, ops[leaf.leaf], q2,))
assert len(stack) == 1
query = stack[0]
joins = ' AND '.join(self.joins)
if joins:
query = '(%s) AND %s' % (joins, query)
return query, tools.flatten(params)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sss/calibre-at-bzr | refs/heads/upstream/master | src/calibre/ebooks/metadata/haodoo.py | 24 | # -*- coding: utf-8 -*-
'''
Read meta information from Haodoo.net pdb files.
'''
__license__ = 'GPL v3'
__copyright__ = '2012, Kan-Ru Chen <kanru@kanru.info>'
__docformat__ = 'restructuredtext en'
from calibre.ebooks.pdb.header import PdbHeaderReader
from calibre.ebooks.pdb.haodoo.reader import Reader
def get_metadata(stream, extract_cover=True):
'''
Return metadata as a L{MetaInfo} object
'''
stream.seek(0)
pheader = PdbHeaderReader(stream)
reader = Reader(pheader, stream, None, None)
return reader.get_metadata()
|
yaii/yai | refs/heads/alpha | share/symbols/i18n.py | 4 | #!/usr/bin/env python
from xml.dom import minidom
import sys
sys.stdout.write("char * stringlst = [")
for filename in sys.argv[1:]:
doc = minidom.parse(filename)
symbols = doc.getElementsByTagName('title')
if symbols:
for symbol in symbols:
sys.stdout.write("\n/* Symbols: " + filename + " */ NC_(\"Symbol\", \"" + symbol.firstChild.nodeValue + "\"),")
sys.stdout.write("];")
|
xaviercobain88/framework-python | refs/heads/master | build/lib.linux-i686-2.7/openerp/addons/base/ir/ir_attachment.py | 12 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hashlib
import itertools
import logging
import os
import re
from openerp import tools
from openerp.osv import fields,osv
_logger = logging.getLogger(__name__)
class ir_attachment(osv.osv):
"""Attachments are used to link binary files or url to any openerp document.
External attachment storage
---------------------------
The 'data' function field (_data_get,data_set) is implemented using
_file_read, _file_write and _file_delete which can be overridden to
implement other storage engines, shuch methods should check for other
location pseudo uri (example: hdfs://hadoppserver)
The default implementation is the file:dirname location that stores files
on the local filesystem using name based on their sha1 hash
"""
def _name_get_resname(self, cr, uid, ids, object, method, context):
data = {}
for attachment in self.browse(cr, uid, ids, context=context):
model_object = attachment.res_model
res_id = attachment.res_id
if model_object and res_id:
model_pool = self.pool.get(model_object)
res = model_pool.name_get(cr,uid,[res_id],context)
res_name = res and res[0][1] or False
if res_name:
field = self._columns.get('res_name',False)
if field and len(res_name) > field.size:
res_name = res_name[:field.size-3] + '...'
data[attachment.id] = res_name
else:
data[attachment.id] = False
return data
# 'data' field implementation
def _full_path(self, cr, uid, location, path):
# location = 'file:filestore'
assert location.startswith('file:'), "Unhandled filestore location %s" % location
location = location[5:]
# sanitize location name and path
location = re.sub('[.]','',location)
location = location.strip('/\\')
path = re.sub('[.]','',path)
path = path.strip('/\\')
return os.path.join(tools.config['root_path'], location, cr.dbname, path)
def _file_read(self, cr, uid, location, fname, bin_size=False):
full_path = self._full_path(cr, uid, location, fname)
r = ''
try:
if bin_size:
r = os.path.getsize(full_path)
else:
r = open(full_path,'rb').read().encode('base64')
except IOError:
_logger.error("_read_file reading %s",full_path)
return r
def _file_write(self, cr, uid, location, value):
bin_value = value.decode('base64')
fname = hashlib.sha1(bin_value).hexdigest()
# scatter files across 1024 dirs
# we use '/' in the db (even on windows)
fname = fname[:3] + '/' + fname
full_path = self._full_path(cr, uid, location, fname)
try:
dirname = os.path.dirname(full_path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
open(full_path,'wb').write(bin_value)
except IOError:
_logger.error("_file_write writing %s",full_path)
return fname
def _file_delete(self, cr, uid, location, fname):
count = self.search(cr, 1, [('store_fname','=',fname)], count=True)
if count <= 1:
full_path = self._full_path(cr, uid, location, fname)
try:
os.unlink(full_path)
except OSError:
_logger.error("_file_delete could not unlink %s",full_path)
except IOError:
# Harmless and needed for race conditions
_logger.error("_file_delete could not unlink %s",full_path)
def _data_get(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
result = {}
location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
bin_size = context.get('bin_size')
for attach in self.browse(cr, uid, ids, context=context):
if location and attach.store_fname:
result[attach.id] = self._file_read(cr, uid, location, attach.store_fname, bin_size)
else:
result[attach.id] = attach.db_datas
return result
def _data_set(self, cr, uid, id, name, value, arg, context=None):
# We dont handle setting data to null
if not value:
return True
if context is None:
context = {}
location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
file_size = len(value.decode('base64'))
if location:
attach = self.browse(cr, uid, id, context=context)
if attach.store_fname:
self._file_delete(cr, uid, location, attach.store_fname)
fname = self._file_write(cr, uid, location, value)
super(ir_attachment, self).write(cr, uid, [id], {'store_fname': fname, 'file_size': file_size}, context=context)
else:
super(ir_attachment, self).write(cr, uid, [id], {'db_datas': value, 'file_size': file_size}, context=context)
return True
_name = 'ir.attachment'
_columns = {
'name': fields.char('Attachment Name',size=256, required=True),
'datas_fname': fields.char('File Name',size=256),
'description': fields.text('Description'),
'res_name': fields.function(_name_get_resname, type='char', size=128, string='Resource Name', store=True),
'res_model': fields.char('Resource Model',size=64, readonly=True, help="The database object this attachment will be attached to"),
'res_id': fields.integer('Resource ID', readonly=True, help="The record id this is attached to"),
'create_date': fields.datetime('Date Created', readonly=True),
'create_uid': fields.many2one('res.users', 'Owner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', change_default=True),
'type': fields.selection( [ ('url','URL'), ('binary','Binary'), ],
'Type', help="Binary File or URL", required=True, change_default=True),
'url': fields.char('Url', size=1024),
# al: We keep shitty field names for backward compatibility with document
'datas': fields.function(_data_get, fnct_inv=_data_set, string='File Content', type="binary", nodrop=True),
'store_fname': fields.char('Stored Filename', size=256),
'db_datas': fields.binary('Database Data'),
'file_size': fields.integer('File Size'),
}
_defaults = {
'type': 'binary',
'file_size': 0,
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'ir.attachment', context=c),
}
def _auto_init(self, cr, context=None):
super(ir_attachment, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('ir_attachment_res_idx',))
if not cr.fetchone():
cr.execute('CREATE INDEX ir_attachment_res_idx ON ir_attachment (res_model, res_id)')
cr.commit()
def check(self, cr, uid, ids, mode, context=None, values=None):
"""Restricts the access to an ir.attachment, according to referred model
In the 'document' module, it is overriden to relax this hard rule, since
more complex ones apply there.
"""
if not ids:
return
res_ids = {}
if ids:
if isinstance(ids, (int, long)):
ids = [ids]
cr.execute('SELECT DISTINCT res_model, res_id FROM ir_attachment WHERE id = ANY (%s)', (ids,))
for rmod, rid in cr.fetchall():
if not (rmod and rid):
continue
res_ids.setdefault(rmod,set()).add(rid)
if values:
if values.get('res_model') and 'res_id' in values:
res_ids.setdefault(values['res_model'],set()).add(values['res_id'])
ima = self.pool.get('ir.model.access')
for model, mids in res_ids.items():
# ignore attachments that are not attached to a resource anymore when checking access rights
# (resource was deleted but attachment was not)
mids = self.pool.get(model).exists(cr, uid, mids)
ima.check(cr, uid, model, mode)
self.pool.get(model).check_access_rule(cr, uid, mids, mode, context=context)
def _search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
ids = super(ir_attachment, self)._search(cr, uid, args, offset=offset,
limit=limit, order=order,
context=context, count=False,
access_rights_uid=access_rights_uid)
if not ids:
if count:
return 0
return []
# Work with a set, as list.remove() is prohibitive for large lists of documents
# (takes 20+ seconds on a db with 100k docs during search_count()!)
orig_ids = ids
ids = set(ids)
# For attachments, the permissions of the document they are attached to
# apply, so we must remove attachments for which the user cannot access
# the linked document.
# Use pure SQL rather than read() as it is about 50% faster for large dbs (100k+ docs),
# and the permissions are checked in super() and below anyway.
cr.execute("""SELECT id, res_model, res_id FROM ir_attachment WHERE id = ANY(%s)""", (list(ids),))
targets = cr.dictfetchall()
model_attachments = {}
for target_dict in targets:
if not (target_dict['res_id'] and target_dict['res_model']):
continue
# model_attachments = { 'model': { 'res_id': [id1,id2] } }
model_attachments.setdefault(target_dict['res_model'],{}).setdefault(target_dict['res_id'],set()).add(target_dict['id'])
# To avoid multiple queries for each attachment found, checks are
# performed in batch as much as possible.
ima = self.pool.get('ir.model.access')
for model, targets in model_attachments.iteritems():
if not ima.check(cr, uid, model, 'read', False):
# remove all corresponding attachment ids
for attach_id in itertools.chain(*targets.values()):
ids.remove(attach_id)
continue # skip ir.rule processing, these ones are out already
# filter ids according to what access rules permit
target_ids = targets.keys()
allowed_ids = self.pool.get(model).search(cr, uid, [('id', 'in', target_ids)], context=context)
disallowed_ids = set(target_ids).difference(allowed_ids)
for res_id in disallowed_ids:
for attach_id in targets[res_id]:
ids.remove(attach_id)
# sort result according to the original sort ordering
result = [id for id in orig_ids if id in ids]
return len(result) if count else list(result)
def read(self, cr, uid, ids, fields_to_read=None, context=None, load='_classic_read'):
self.check(cr, uid, ids, 'read', context=context)
return super(ir_attachment, self).read(cr, uid, ids, fields_to_read, context, load)
def write(self, cr, uid, ids, vals, context=None):
self.check(cr, uid, ids, 'write', context=context, values=vals)
if 'file_size' in vals:
del vals['file_size']
return super(ir_attachment, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
self.check(cr, uid, [id], 'write', context=context)
return super(ir_attachment, self).copy(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, context=None):
self.check(cr, uid, ids, 'unlink', context=context)
location = self.pool.get('ir.config_parameter').get_param(cr, uid, 'ir_attachment.location')
if location:
for attach in self.browse(cr, uid, ids, context=context):
if attach.store_fname:
self._file_delete(cr, uid, location, attach.store_fname)
return super(ir_attachment, self).unlink(cr, uid, ids, context)
def create(self, cr, uid, values, context=None):
self.check(cr, uid, [], mode='create', context=context, values=values)
if 'file_size' in values:
del values['file_size']
return super(ir_attachment, self).create(cr, uid, values, context)
def action_get(self, cr, uid, context=None):
return self.pool.get('ir.actions.act_window').for_xml_id(
cr, uid, 'base', 'action_attachment', context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
pozetroninc/micropython | refs/heads/stable | tests/basics/with_return.py | 82 | class CtxMgr:
def __init__(self, id):
self.id = id
def __enter__(self):
print("__enter__", self.id)
return self
def __exit__(self, a, b, c):
print("__exit__", self.id, repr(a), repr(b))
# simple case
def foo():
with CtxMgr(1):
return 4
print(foo())
# for loop within with (iterator needs removing upon return)
def f():
with CtxMgr(1):
for i in [1, 2]:
return i
print(f())
# multiple for loops within with
def f():
with CtxMgr(1):
for i in [1, 2]:
for j in [3, 4]:
return (i, j)
print(f())
# multiple for loops within nested withs
def f():
with CtxMgr(1):
for i in [1, 2]:
for j in [3, 4]:
with CtxMgr(2):
for k in [5, 6]:
for l in [7, 8]:
return (i, j, k, l)
print(f())
# multiple for loops that are optimised, and nested withs
def f():
with CtxMgr(1):
for i in range(1, 3):
for j in range(3, 5):
with CtxMgr(2):
for k in range(5, 7):
for l in range(7, 9):
return (i, j, k, l)
print(f())
|
guessit-io/guessit | refs/heads/develop | guessit/test/test_main.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, pointless-statement, missing-docstring, invalid-name
import json
import os
import sys
import pytest
from _pytest.capture import CaptureFixture
from ..__main__ import main
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# Prevent output from spamming the console
@pytest.fixture(scope="function", autouse=True)
def no_stdout(monkeypatch):
with open(os.devnull, "w") as f:
monkeypatch.setattr(sys, "stdout", f)
yield
def test_main_no_args():
main([])
def test_main():
main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv'])
def test_main_unicode():
main(['[阿维达].Avida.2006.FRENCH.DVDRiP.XViD-PROD.avi'])
def test_main_forced_unicode():
main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv'])
def test_main_verbose():
main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '--verbose'])
def test_main_yaml():
main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '--yaml'])
def test_main_json():
main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '--json'])
def test_main_show_property():
main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '-P', 'title'])
def test_main_advanced():
main(['Fear.and.Loathing.in.Las.Vegas.FRENCH.ENGLISH.720p.HDDVD.DTS.x264-ESiR.mkv', '-a'])
def test_main_input():
main(['--input', os.path.join(__location__, 'test-input-file.txt')])
def test_main_properties():
main(['-p'])
main(['-p', '--json'])
main(['-p', '--yaml'])
def test_main_values():
main(['-V'])
main(['-V', '--json'])
main(['-V', '--yaml'])
def test_main_help():
with pytest.raises(SystemExit):
main(['--help'])
def test_main_version():
main(['--version'])
def test_json_output_input_string(capsys: CaptureFixture):
main(['--json', '--output-input-string', 'test.avi'])
outerr = capsys.readouterr()
data = json.loads(outerr.out)
assert 'input_string' in data
assert data['input_string'] == 'test.avi'
def test_json_no_output_input_string(capsys: CaptureFixture):
main(['--json', 'test.avi'])
outerr = capsys.readouterr()
data = json.loads(outerr.out)
assert 'input_string' not in data
|
wjw12/emc | refs/heads/master | EMC.py | 1 | """
Inplementation of EMC algorithm for 2D image reconstruction using sparse data
Reference: Hugh T. Philipp, Kartik Ayyer, Mark W. Tate, Veit Elser, and Sol M. Gruner,
"Solving structure with sparse, randomly-oriented x-ray data", OPTICS EXPRESS, 2012
X-ray data can be downloaded from http://cxidb.org/id-18.html
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.ndimage.interpolation import rotate
from scipy.sparse import csc_matrix,lil_matrix
from scipy.ndimage import convolve
__author__ = 'Jiewen_Wang'
class EMC2D():
def __init__(self,model="2.bmp",cxifile='115.cxi',samples=1000):
self.M_ROT = 250
self.M_DATA = samples
self.SCALE = 100000 # normalize factor
self.model,self.WIDTH = self.loadBmp(model)
self.M_PIX = self.WIDTH * self.WIDTH
#self.getDataFromCxi(cxifile,samples)
self.exp_data = load_sparse_csc('exp_data_025.npz')[:,:samples].astype('float64')
self.generateRotationSpace()
# normalize model
self.model = self.normalizeImgArray(self.model)
# normalize experimental data
self.exp_data /= self.SCALE
self.prev_model = 0
def getDataFromCxi(self,cxi,n): # get n frames from cxi file
print ('Getting data from cxi file...')
import h5py
f = h5py.File(cxi)
data = f['entry_1']['instrument_1']['detector_1']['data']
assert (data.shape[1] == self.WIDTH)
self.exp_data = lil_matrix((self.M_PIX,n))
x = 150
for i in range(x):
showProgress(x,i)
self.exp_data[:, i*n//x:(i+1)*n//x] = lil_matrix(data[i*n//x:(i+1)*n//x,:,:].reshape((self.M_PIX,n//x)).astype('float64'))
self.exp_data = self.exp_data.tocsc()
save_sparse_csc('exp_data',self.exp_data)
print ('Data done.')
def loadBmp(self,filename):
import struct as st
f = open(filename,'rb')
data = bytearray(f.read())
offsetToArray = st.unpack_from('I',data,10)[0]
width = st.unpack_from('I',data,18)[0]
height = st.unpack_from('I',data,22)[0]
assert(width == height),"image must have same width and height"
bitsPerPixel = st.unpack_from('H',data,28)[0]
bytesPerPixel = bitsPerPixel // 8
bytesPerRow = bytesPerPixel * width
# store the pixel array (in greyscale)
pixels = []
for row in range(height):
for col in range(width):
offset = offsetToArray + row * bytesPerRow + col * bytesPerPixel
b = st.unpack_from('B',data,offset)[0]
g = st.unpack_from('B',data,offset+1)[0]
r = st.unpack_from('B',data,offset+2)[0]
pixels.append((b+g+r)//3)
pixels = np.array(pixels).reshape((width,width))
return pixels, width
def rotateImg(self,image,angle):
return self.normalizeImgArray( rotate(self.unnormalizeImgArray(image),angle,reshape=False) )
def generateRotationSpace(self):
self.rotation = np.linspace(0,360,self.M_ROT)
def normalizeImgArray(self,img):
return np.float64(img) / self.SCALE
def unnormalizeImgArray(self,img):
return np.float64(img * self.SCALE)
def expand(self):
result = np.zeros((self.M_PIX,self.M_ROT))
for j in range(self.M_ROT):
rot_img = self.rotateImg(self.model,self.rotation[j]).reshape(self.M_PIX,1)
result[:,j] = rot_img[:,0]
self.reference = result
def cond_prob(self):
log_W = np.log(self.reference.T)
log_W[np.isinf(log_W) | np.isnan(log_W)] = -100
W = np.exp(log_W)
A = csc_matrix(log_W) * self.exp_data
A = A.toarray()
prob = A - np.tile(np.sum(W,1), (self.M_DATA,1)).T # log probability
prob -= np.max(prob)
prob = np.exp(prob)
S = np.sum(prob, 0)
prob = prob / np.tile(S, (self.M_ROT,1))
return prob
def EM(self,row_n=200,col_n=2):
P = self.cond_prob()
for i in range(self.M_DATA):
ind = np.argpartition(P[:,i], self.M_ROT-col_n)
ind1 = ind[:self.M_ROT-col_n]
ind2 = ind[-col_n:]
P[:,i][ind1] = 1e-80
for i in range(self.M_ROT):
ind = np.argpartition(P[i,:], self.M_DATA-row_n)
ind1 = ind[:self.M_DATA-row_n]
ind2 = ind[-row_n:]
P[i,:][ind1] = 1e-80
# test P
#c = P != 1e-80
#a = np.where(np.sum(c,axis=0) == col_n)
#i = a[0][0]
'''
ind = np.argpartition(P[0,:], self.M_DATA-row_n)
ind1 = ind[:self.M_DATA-row_n]
ind2 = ind[-row_n:]
p = np.sort(P[0,:][ind2])
x = [i for i in range(len(p))]
plt.plot(x,p)
plt.show()
'''
w = np.max(P,1)
maxw = np.max(w)
minw = np.min(w)
delta = 1e-50
if maxw - minw < delta:
self.weight = np.ones(w.shape)
else:
self.weight = (w - np.min(w)) / (np.max(w) - np.min(w)) # 1*M_ROT array, weight for compression
# j-th element represents weight for j-th intensity
new_refer = self.exp_data*csc_matrix(P.T)
new_refer = new_refer.toarray()
weight = np.tile( np.max(P,1), (self.M_PIX, 1))
new_refer *= weight
S = np.sum(P,1)
new_refer /= np.tile(S, (self.M_PIX,1))
self.reference = new_refer
def compress(self):
self.prev_model = self.model
new_refer = self.reference * np.tile(self.weight, (self.M_PIX,1))
r_img = np.reshape(new_refer,(self.WIDTH,self.WIDTH,self.M_ROT))
model = np.zeros((self.WIDTH,self.WIDTH))
for j in range(self.M_ROT):
re_img = self.rotateImg(r_img[:,:,j], -self.rotation[j])
model += re_img
model = model / self.M_ROT
self.model = model
def run(self,iterations):
log_file = open('diff.log','w')
for it in range(iterations):
print ("Iteration ",it+1)
self.expand()
self.EM()
self.compress()
# bluring the model
k = np.ones((3,3)) / 9
self.model = convolve(self.model,k)
self.save_model(it+1)
# calculate the difference
diff = np.mean(np.abs(self.model - self.prev_model))
print (diff, file=log_file)
if it==iterations-1:
self.show(1,1)
print ('Done.')
def runModel(self,model_file,iterations,curr_it=1):
self.model = np.load(model_file)
log_file = open('diff.log','a')
for it in range(iterations):
print("Iteration ",it+curr_it)
self.expand()
self.EM()
self.compress()
# bluring the model
k = np.ones((3,3)) / 9
self.model = convolve(self.model,k)
self.save_model(it+curr_it)
# calculate the difference
diff = np.mean(np.abs(self.model - self.prev_model))
print (diff, file=log_file)
if it==iterations-1:
self.show(1,1)
print ('Done.')
def show(self,total,subplot):
plt.subplot(1,total,subplot)
model = self.unnormalizeImgArray(self.model)
img_plot = plt.imshow(np.abs(model), cmap=cm.Greys_r)
img_plot.set_clim(0.0, np.max(model))
if subplot == total:
plt.show()
def save_model(self,n):
np.save('model_' + str(n),self.model)
def showProgress(total,current,char='#',length=75):
import sys
progress = int(current / total * 100)
n = int(current / total * length)
s = str(progress) + "% " + char * n
f = sys.stdout
f.write(s)
f.flush()
f.write('\r')
if total == current:
f.write('\n')
def save_sparse_csc(filename,array):
np.savez(filename,data = array.data, indices = array.indices, indptr = array.indptr, shape = array.shape)
def load_sparse_csc(filename):
loader = np.load(filename)
return csc_matrix((loader['data'], loader['indices'], loader['indptr']), shape = loader['shape'])
def convertData(dat_file,detector_file):
dat = open(dat_file)
detector = open(detector_file)
i = iter(dat)
frames = int(i.readline())
next(i)
next(i) # skip 2 lines
print ('Total frames:', frames)
ii = iter(detector)
pixels = int(ii.readline())
print ('Pixels in the detector:',pixels)
det_dict = {} # store the coordinates in a dictionary
for j in range(pixels):
coo = tuple(ii.readline().split('\t'))
coo = (90 - int(coo[1])) * 181 + (int(coo[0]) + 90)
det_dict[j] = int(coo)
exp_data = lil_matrix((181*181,frames),dtype='uint8')
for frame in range(frames):
locations = [int(n) for n in i.readline().split(' ') if n != '\n']
for n in locations:
exp_data[det_dict[n],frame] = 1
try:
next(i)
next(i)
next(i)
next(i) # skip 4 lines
except:
break
exp_data = exp_data.tocsc()
save_sparse_csc('exp_data',exp_data)
# test plotting
img = exp_data.sum(axis=1).reshape((181,181))
img_plot = plt.imshow(img, cmap = cm.Greys_r)
img_plot.set_clim(0,np.max(img))
plt.show()
if __name__ == '__main__':
emc1 = EMC2D(samples=450000)
#emc1.run(50)
emc1.runModel('model_50.npy',50,51) |
archen/django | refs/heads/master | django/db/models/sql/compiler.py | 6 | import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends.utils import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (CURSOR, SINGLE, MULTI, NO_RESULTS,
ORDER_DIR, GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# When ordering a queryset with distinct on a column not part of the
# select set, the ordering column needs to be added to the select
# clause. This information is needed both in SQL construction and
# masking away the ordering selects from the returned row.
self.ordering_aliases = []
self.ordering_params = []
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.get_meta().db_table, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def __call__(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
return self(name)
def compile(self, node):
vendor_impl = getattr(
node, 'as_' + self.connection.vendor, None)
if vendor_impl:
return vendor_impl(self, self.connection)
else:
return node.as_sql(self, self.connection)
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.query.where)
having, h_params = self.compile(self.query.having)
having_group_by = self.query.having.get_group_by_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
params.extend(o_params)
result.append(', '.join(out_cols + self.ordering_aliases))
params.extend(s_params)
params.extend(self.ordering_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None and not self.query.distinct_fields:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement, as well as
a list any extra parameters that need to be included. If no columns
have been specified, returns all columns relating to fields in the
model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = self.compile(col)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = self.compile(aggregate)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
qn = self
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.column
for seen_model, seen_alias in seen_models.items():
if seen_model and seen_alias == alias:
ancestor_link = seen_model._meta.get_ancestor_link(model)
if ancestor_link:
column = ancestor_link.column
break
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field))
aliases.add(alias)
continue
if with_aliases and column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.get_meta().ordering
or [])
qn = self
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
params = []
ordering_params = []
# For plain DISTINCT queries any ORDER BY clause must appear
# in SELECT clause.
# http://www.postgresql.org/message-id/27009.1171559417@sss.pgh.pa.us
must_append_to_select = distinct and not self.query.distinct_fields
for pos, field in enumerate(ordering):
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not must_append_to_select or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif not self.query._extra or get_order_dir(field)[0] not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, cols, order in self.find_ordering_name(field,
self.query.get_meta(), default_order=asc):
for col in cols:
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if must_append_to_select and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if col not in self.query.extra_select:
if must_append_to_select:
sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
ordering_aliases.append(sql)
ordering_params.extend(self.query.extra[col][1])
result.append('%s %s' % (elt, order))
else:
result.append("(%s) %s" % (self.query.extra[col][0], order))
params.extend(self.query.extra[col][1])
else:
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra[col])
self.ordering_aliases = ordering_aliases
self.ordering_params = ordering_params
return result, params, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and path and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(self.query.alias_map[j].table_name for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(alias, [t.column for t in targets], order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = '' if alias == name else (' %s' % alias)
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = self.compile(extra_cond)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s ON ('
% (join_type, qn(name), alias_str))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' %
(qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = '' if first else ', '
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = '' if first else ', '
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.get_meta().db_table, self.query.get_meta().pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
self.compile(col)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or order_params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns, _ = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo((col[0], col[1].column), col[1]) for col in columns)
if restricted:
next = requested.get(f.name, {})
else:
next = False
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.related_query_name()], opts, root_alias)
alias = joins[-1]
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, _ = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo((col[0], col[1].column), col[1]) for col in columns)
next = requested.get(f.related_query_name(), {})
self.fill_related_selections(model._meta, alias, cur_depth + 1,
next, restricted)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if has_aggregate_select:
loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
aggregate_start = len(self.query.extra_select) + len(loaded_fields)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_cols isn't populated until
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
elif self.query.default_cols:
fields = self.query.get_meta().concrete_fields
else:
fields = []
fields = fields + [f.field for f in self.query.related_select_cols]
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
fields = [f for f in fields if f.model._meta.db_table not in only_load or
f.column in only_load[f.model._meta.db_table]]
if has_aggregate_select:
# pad None in to fields for aggregates
fields = fields[:aggregate_start] + [
None for x in range(0, aggregate_end - aggregate_start)
] + fields[aggregate_start:]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
row = tuple(row[:aggregate_start]) + tuple(
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
) + tuple(row[aggregate_end:])
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
# The MULTI case.
if self.ordering_aliases:
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = cursor_iter(cursor,
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, qn):
inner_qn = self
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs = '%s.%s' % (inner_qn(select_col.col[0]), qn2(select_col.col[1]))
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple(v for val in values for v in val))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self
sql, params = [], []
for aggregate in self.query.aggregate_select.values():
agg_sql, agg_params = self.compile(aggregate)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateField
fields = [DateField()]
else:
from django.db.backends.utils import typecast_date
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.utils import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if resolve_columns:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artifically returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.datetimes(). Are time zone "
"definitions for your database and pytz installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def cursor_iter(cursor, sentinel):
"""
Yields blocks of rows from a cursor and ensures the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield rows
finally:
cursor.close()
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
finally:
cursor.close()
|
0x7E/ubuntu-tweak | refs/heads/master | ubuntutweak/utils/parser.py | 4 | import os
import json
import urllib
from ubuntutweak.common import consts
class Parser(dict):
def __init__(self, file, key):
try:
self.__data = json.loads(open(file).read())
self.init_items(key)
except:
self.is_available = False
else:
self.is_available = True
def get_data(self):
return self.__data
def init_items(self, key):
for item in self.__data:
item['fields']['id'] = item['pk']
self[item['fields'][key]] = item['fields']
def get_by_lang(self, key, field):
value = self[key][field]
if consts.LANG in value.keys():
return value[consts.LANG]
else:
return value['raw']
|
vadimtk/chrome4sdp | refs/heads/master | tools/traceline/traceline/scripts/split.py | 186 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Splits a single json file (read from stdin) into separate files of 40k
records, named split.X.
"""
import sys
def main():
filecount = 0
count = 0
f = open('split.0', 'wb')
for l in sys.stdin:
if l == "},\r\n":
count += 1
if count == 40000:
f.write("}]);\r\n")
count = 0
filecount += 1
f = open('split.%d' % filecount, 'wb')
f.write("parseEvents([\r\n")
continue
f.write(l)
if __name__ == '__main__':
main()
|
lmazuel/azure-sdk-for-python | refs/heads/master | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/network_interface_ip_configuration_py3.py | 1 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class NetworkInterfaceIPConfiguration(SubResource):
"""IPConfiguration in a network interface.
:param id: Resource ID.
:type id: str
:param application_gateway_backend_address_pools: The reference of
ApplicationGatewayBackendAddressPool resource.
:type application_gateway_backend_address_pools:
list[~azure.mgmt.network.v2017_08_01.models.ApplicationGatewayBackendAddressPool]
:param load_balancer_backend_address_pools: The reference of
LoadBalancerBackendAddressPool resource.
:type load_balancer_backend_address_pools:
list[~azure.mgmt.network.v2017_08_01.models.BackendAddressPool]
:param load_balancer_inbound_nat_rules: A list of references of
LoadBalancerInboundNatRules.
:type load_balancer_inbound_nat_rules:
list[~azure.mgmt.network.v2017_08_01.models.InboundNatRule]
:param private_ip_address: Private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: Defines how a private IP address is
assigned. Possible values are: 'Static' and 'Dynamic'. Possible values
include: 'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2017_08_01.models.IPAllocationMethod
:param private_ip_address_version: Available from Api-Version 2016-03-30
onwards, it represents whether the specific ipconfiguration is IPv4 or
IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'.
Possible values include: 'IPv4', 'IPv6'
:type private_ip_address_version: str or
~azure.mgmt.network.v2017_08_01.models.IPVersion
:param subnet: Subnet bound to the IP configuration.
:type subnet: ~azure.mgmt.network.v2017_08_01.models.Subnet
:param primary: Gets whether this is a primary customer address on the
network interface.
:type primary: bool
:param public_ip_address: Public IP address bound to the IP configuration.
:type public_ip_address:
~azure.mgmt.network.v2017_08_01.models.PublicIPAddress
:param provisioning_state: The provisioning state of the network interface
IP configuration. Possible values are: 'Updating', 'Deleting', and
'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[ApplicationGatewayBackendAddressPool]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[BackendAddressPool]'},
'load_balancer_inbound_nat_rules': {'key': 'properties.loadBalancerInboundNatRules', 'type': '[InboundNatRule]'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'private_ip_address_version': {'key': 'properties.privateIPAddressVersion', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'primary': {'key': 'properties.primary', 'type': 'bool'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, application_gateway_backend_address_pools=None, load_balancer_backend_address_pools=None, load_balancer_inbound_nat_rules=None, private_ip_address: str=None, private_ip_allocation_method=None, private_ip_address_version=None, subnet=None, primary: bool=None, public_ip_address=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(NetworkInterfaceIPConfiguration, self).__init__(id=id, **kwargs)
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_inbound_nat_rules = load_balancer_inbound_nat_rules
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.private_ip_address_version = private_ip_address_version
self.subnet = subnet
self.primary = primary
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
BIT-SYS/gem5-spm-module | refs/heads/master | ext/ply/example/BASIC/basiclex.py | 166 | # An implementation of Dartmouth BASIC (1964)
from ply import *
keywords = (
'LET','READ','DATA','PRINT','GOTO','IF','THEN','FOR','NEXT','TO','STEP',
'END','STOP','DEF','GOSUB','DIM','REM','RETURN','RUN','LIST','NEW',
)
tokens = keywords + (
'EQUALS','PLUS','MINUS','TIMES','DIVIDE','POWER',
'LPAREN','RPAREN','LT','LE','GT','GE','NE',
'COMMA','SEMI', 'INTEGER','FLOAT', 'STRING',
'ID','NEWLINE'
)
t_ignore = ' \t'
def t_REM(t):
r'REM .*'
return t
def t_ID(t):
r'[A-Z][A-Z0-9]*'
if t.value in keywords:
t.type = t.value
return t
t_EQUALS = r'='
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_POWER = r'\^'
t_DIVIDE = r'/'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LT = r'<'
t_LE = r'<='
t_GT = r'>'
t_GE = r'>='
t_NE = r'<>'
t_COMMA = r'\,'
t_SEMI = r';'
t_INTEGER = r'\d+'
t_FLOAT = r'((\d*\.\d+)(E[\+-]?\d+)?|([1-9]\d*E[\+-]?\d+))'
t_STRING = r'\".*?\"'
def t_NEWLINE(t):
r'\n'
t.lexer.lineno += 1
return t
def t_error(t):
print("Illegal character %s" % t.value[0])
t.lexer.skip(1)
lex.lex(debug=0)
|
AIML/scikit-learn | refs/heads/master | sklearn/utils/metaestimators.py | 283 | """Utilities for meta-estimators"""
# Author: Joel Nothman
# Andreas Mueller
# Licence: BSD
from operator import attrgetter
from functools import update_wrapper
__all__ = ['if_delegate_has_method']
class _IffHasAttrDescriptor(object):
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if the ``attribute_name`` is not present on the base object.
This allows ducktyping of the decorated method based on ``attribute_name``.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, attribute_name):
self.fn = fn
self.get_attribute = attrgetter(attribute_name)
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, type=None):
# raise an AttributeError if the attribute is not present on the object
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
self.get_attribute(obj)
# lambda, but not partial, allows help() to work with update_wrapper
out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
# update the docstring of the returned function
update_wrapper(out, self.fn)
return out
def if_delegate_has_method(delegate):
"""Create a decorator for methods that are delegated to a sub-estimator
This enables ducktyping by hasattr returning True according to the
sub-estimator.
>>> from sklearn.utils.metaestimators import if_delegate_has_method
>>>
>>>
>>> class MetaEst(object):
... def __init__(self, sub_est):
... self.sub_est = sub_est
...
... @if_delegate_has_method(delegate='sub_est')
... def predict(self, X):
... return self.sub_est.predict(X)
...
>>> class HasPredict(object):
... def predict(self, X):
... return X.sum(axis=1)
...
>>> class HasNoPredict(object):
... pass
...
>>> hasattr(MetaEst(HasPredict()), 'predict')
True
>>> hasattr(MetaEst(HasNoPredict()), 'predict')
False
"""
return lambda fn: _IffHasAttrDescriptor(fn, '%s.%s' % (delegate, fn.__name__))
|
OAButton/odb | refs/heads/master | portality/view/media.py | 2 | '''
An auth-controlled access and retrieval mechanism for a media folder
'''
import json, os
from flask import Blueprint, request, url_for, flash, redirect, abort, make_response
from flask import render_template
from flask.ext.login import current_user
import werkzeug
from portality.core import app
import portality.util as util
import portality.models as models
blueprint = Blueprint('media', __name__)
mediadir = os.path.dirname(os.path.abspath(__file__)).replace('/portality/view','/') + app.config['MEDIA_FOLDER']
if not os.path.exists(mediadir):
os.makedirs(mediadir)
@blueprint.route('.json')
@blueprint.route('/')
def media():
listing = os.listdir( mediadir )
listing = sorted(listing, key=str.lower)
if util.request_wants_json():
response = make_response( json.dumps(listing,""," ") )
response.headers["Content-type"] = "application/json"
return response
else:
usedin = {}
for f in listing:
# see if it is used in any records
#try:
r = models.Pages().query(q='*' + f + '*')
usedin[f] = [i['_source']['url'] for i in r.get('hits',{}).get('hits',[])]
#except:
# usedin[f] = []
return render_template('media/media.html', files=listing, usedin=usedin)
@blueprint.route('/<path:path>', methods=['GET','POST','DELETE'])
def medias(path=''):
if request.method == 'GET':
# NOTE: this is only an alternative for when running in debug mode - it delivers images from media folder successfully
# otherwise you should set your web server (nginx, apache, whatever) to serve GETs on /media/.*
loc = mediadir + '/' + path
if os.path.isfile(loc):
response = make_response(open(loc).read())
#response.headers["Content-type"] = "image"
return response
else:
abort(404)
elif ( ( request.method == 'DELETE' or ( request.method == 'POST' and request.form.get('submit',False) == 'Delete' ) ) and current_user.is_super ):
try:
loc = mediadir + '/' + path
if os.path.isfile(loc):
os.remove(loc)
except:
pass
return ''
elif request.method == 'POST' and current_user.is_super:
# TODO: check the file type meets the allowed ones, and if so put it in media dir
filename = werkzeug.secure_filename(path)
out = open(mediadir + '/' + filename, 'w')
out.write(request.data)
out.close()
return ''
else:
abort(401)
|
evalsocket/tensorflow | refs/heads/master | object_detection/object_detection/builders/post_processing_builder_test.py | 21 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for post_processing_builder."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import post_processing_builder
from object_detection.protos import post_processing_pb2
class PostProcessingBuilderTest(tf.test.TestCase):
def test_build_non_max_suppressor_with_correct_parameters(self):
post_processing_text_proto = """
batch_non_max_suppression {
score_threshold: 0.7
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
non_max_suppressor, _ = post_processing_builder.build(
post_processing_config)
self.assertEqual(non_max_suppressor.keywords['max_size_per_class'], 100)
self.assertEqual(non_max_suppressor.keywords['max_total_size'], 300)
self.assertAlmostEqual(non_max_suppressor.keywords['score_thresh'], 0.7)
self.assertAlmostEqual(non_max_suppressor.keywords['iou_thresh'], 0.6)
def test_build_identity_score_converter(self):
post_processing_text_proto = """
score_converter: IDENTITY
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter, tf.identity)
def test_build_sigmoid_score_converter(self):
post_processing_text_proto = """
score_converter: SIGMOID
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter, tf.sigmoid)
def test_build_softmax_score_converter(self):
post_processing_text_proto = """
score_converter: SOFTMAX
"""
post_processing_config = post_processing_pb2.PostProcessing()
text_format.Merge(post_processing_text_proto, post_processing_config)
_, score_converter = post_processing_builder.build(post_processing_config)
self.assertEqual(score_converter, tf.nn.softmax)
if __name__ == '__main__':
tf.test.main()
|
mansonul/events | refs/heads/master | events/contrib/plugins/form_elements/fields/null_boolean/apps.py | 1 | __title__ = 'fobi.contrib.plugins.form_elements.fields.null_boolean.apps'
__author__ = 'Artur Barseghyan <artur.barseghyan@gmail.com>'
__copyright__ = '2014-2017 Artur Barseghyan'
__license__ = 'GPL 2.0/LGPL 2.1'
__all__ = ('Config',)
try:
from django.apps import AppConfig
class Config(AppConfig):
"""Config."""
name = 'fobi.contrib.plugins.form_elements.fields.null_boolean'
label = 'fobi_contrib_plugins_form_elements_fields_null_boolean'
except ImportError:
pass
|
bneg/Empire | refs/heads/master | lib/modules/powershell/situational_awareness/network/powerview/get_group_member.py | 3 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-DomainGroupMember',
'Author': ['@harmj0y'],
'Description': ('Returns the members of a given group, with the option to "Recurse" to find all effective group members. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Identity' : {
'Description' : 'A SamAccountName, DistinguishedName, SID, GUID, or a dns host name, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'The domain to use for the query, defaults to the current domain.',
'Required' : False,
'Value' : ''
},
'Recurse' : {
'Description' : 'Switch. If the group member is a group, recursively try to query its members as well.',
'Required' : False,
'Value' : ''
},
'RecurseUsingMatchingRule' : {
'Description' : 'Switch. Use LDAP_MATCHING_RULE_IN_CHAIN in the LDAP search query when -Recurse is specified.',
'Required' : False,
'Value' : ''
},
'LDAPFilter' : {
'Description' : 'Specifies an LDAP query string that is used to filter Active Directory objects.',
'Required' : False,
'Value' : ''
},
'SearchBase' : {
'Description' : 'The LDAP source to search through, e.g. "LDAP://OU=secret,DC=testlab,DC=local" Useful for OU queries.',
'Required' : False,
'Value' : ''
},
'Server' : {
'Description' : 'Specifies an active directory server (domain controller) to bind to',
'Required' : False,
'Value' : ''
},
'SearchScope' : {
'Description' : 'Specifies the scope to search under, Base/OneLevel/Subtree (default of Subtree)',
'Required' : False,
'Value' : ''
},
'ResultPageSize' : {
'Description' : 'Specifies the PageSize to set for the LDAP searcher object.',
'Required' : False,
'Value' : ''
},
'ServerTimeLimit' : {
'Description' : 'Specifies the maximum amount of time the server spends searching. Default of 120 seconds.',
'Required' : False,
'Value' : ''
},
'SecurityMasks' : {
'Description' : 'Specifies an option for examining security information of a directory object. One of "Dacl", "Group", "None", "Owner", "Sacl".',
'Required' : False,
'Value' : ''
},
'Tombstone' : {
'Description' : 'Switch. Specifies that the search should also return deleted/tombstoned objects.',
'Required' : False,
'Value' : 'False'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
|
Arcanemagus/SickRage | refs/heads/master | lib/pbr/hooks/commands.py | 24 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools.command import easy_install
from pbr.hooks import base
from pbr import options
from pbr import packaging
class CommandsConfig(base.BaseConfig):
section = 'global'
def __init__(self, config):
super(CommandsConfig, self).__init__(config)
self.commands = self.config.get('commands', "")
def save(self):
self.config['commands'] = self.commands
super(CommandsConfig, self).save()
def add_command(self, command):
self.commands = "%s\n%s" % (self.commands, command)
def hook(self):
self.add_command('pbr.packaging.LocalEggInfo')
self.add_command('pbr.packaging.LocalSDist')
self.add_command('pbr.packaging.LocalInstallScripts')
self.add_command('pbr.packaging.LocalDevelop')
self.add_command('pbr.packaging.LocalRPMVersion')
self.add_command('pbr.packaging.LocalDebVersion')
if os.name != 'nt':
easy_install.get_script_args = packaging.override_get_script_args
if packaging.have_sphinx():
self.add_command('pbr.builddoc.LocalBuildDoc')
if os.path.exists('.testr.conf') and packaging.have_testr():
# There is a .testr.conf file. We want to use it.
self.add_command('pbr.packaging.TestrTest')
elif self.config.get('nosetests', False) and packaging.have_nose():
# We seem to still have nose configured
self.add_command('pbr.packaging.NoseTest')
use_egg = options.get_boolean_option(
self.pbr_config, 'use-egg', 'PBR_USE_EGG')
# We always want non-egg install unless explicitly requested
if 'manpages' in self.pbr_config or not use_egg:
self.add_command('pbr.packaging.LocalInstall')
else:
self.add_command('pbr.packaging.InstallWithGit')
|
tbinjiayou/Odoo | refs/heads/master | openerp/modules/migration.py | 279 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Modules migration handling. """
import imp
import logging
import os
from os.path import join as opj
import openerp
import openerp.release as release
import openerp.tools as tools
from openerp.tools.parse_version import parse_version
_logger = logging.getLogger(__name__)
class MigrationManager(object):
"""
This class manage the migration of modules
Migrations files must be python files containing a "migrate(cr, installed_version)" function.
Theses files must respect a directory tree structure: A 'migrations' folder which containt a
folder by version. Version can be 'module' version or 'server.module' version (in this case,
the files will only be processed by this version of the server). Python file names must start
by 'pre' or 'post' and will be executed, respectively, before and after the module initialisation
Example:
<moduledir>
`-- migrations
|-- 1.0
| |-- pre-update_table_x.py
| |-- pre-update_table_y.py
| |-- post-clean-data.py
| `-- README.txt # not processed
|-- 5.0.1.1 # files in this folder will be executed only on a 5.0 server
| |-- pre-delete_table_z.py
| `-- post-clean-data.py
`-- foo.py # not processed
This similar structure is generated by the maintenance module with the migrations files get by
the maintenance contract
"""
def __init__(self, cr, graph):
self.cr = cr
self.graph = graph
self.migrations = {}
self._get_files()
def _get_files(self):
"""
import addons.base.maintenance.utils as maintenance_utils
maintenance_utils.update_migrations_files(self.cr)
#"""
for pkg in self.graph:
self.migrations[pkg.name] = {}
if not (hasattr(pkg, 'update') or pkg.state == 'to upgrade'):
continue
get_module_filetree = openerp.modules.module.get_module_filetree
self.migrations[pkg.name]['module'] = get_module_filetree(pkg.name, 'migrations') or {}
self.migrations[pkg.name]['maintenance'] = get_module_filetree('base', 'maintenance/migrations/' + pkg.name) or {}
def migrate_module(self, pkg, stage):
assert stage in ('pre', 'post')
stageformat = {
'pre': '[>%s]',
'post': '[%s>]',
}
if not (hasattr(pkg, 'update') or pkg.state == 'to upgrade') or pkg.state == 'to install':
return
def convert_version(version):
if version.count('.') >= 2:
return version # the version number already containt the server version
return "%s.%s" % (release.major_version, version)
def _get_migration_versions(pkg):
def __get_dir(tree):
return [d for d in tree if tree[d] is not None]
versions = list(set(
__get_dir(self.migrations[pkg.name]['module']) +
__get_dir(self.migrations[pkg.name]['maintenance'])
))
versions.sort(key=lambda k: parse_version(convert_version(k)))
return versions
def _get_migration_files(pkg, version, stage):
""" return a list of tuple (module, file)
"""
m = self.migrations[pkg.name]
lst = []
mapping = {
'module': opj(pkg.name, 'migrations'),
'maintenance': opj('base', 'maintenance', 'migrations', pkg.name),
}
for x in mapping.keys():
if version in m[x]:
for f in m[x][version]:
if m[x][version][f] is not None:
continue
if not f.startswith(stage + '-'):
continue
lst.append(opj(mapping[x], version, f))
lst.sort()
return lst
parsed_installed_version = parse_version(pkg.installed_version or '')
current_version = parse_version(convert_version(pkg.data['version']))
versions = _get_migration_versions(pkg)
for version in versions:
if parsed_installed_version < parse_version(convert_version(version)) <= current_version:
strfmt = {'addon': pkg.name,
'stage': stage,
'version': stageformat[stage] % version,
}
for pyfile in _get_migration_files(pkg, version, stage):
name, ext = os.path.splitext(os.path.basename(pyfile))
if ext.lower() != '.py':
continue
mod = fp = fp2 = None
try:
fp, fname = tools.file_open(pyfile, pathinfo=True)
if not isinstance(fp, file):
# imp.load_source need a real file object, so we create
# one from the file-like object we get from file_open
fp2 = os.tmpfile()
fp2.write(fp.read())
fp2.seek(0)
try:
mod = imp.load_source(name, fname, fp2 or fp)
_logger.info('module %(addon)s: Running migration %(version)s %(name)s' % dict(strfmt, name=mod.__name__))
migrate = mod.migrate
except ImportError:
_logger.exception('module %(addon)s: Unable to load %(stage)s-migration file %(file)s' % dict(strfmt, file=pyfile))
raise
except AttributeError:
_logger.error('module %(addon)s: Each %(stage)s-migration file must have a "migrate(cr, installed_version)" function' % strfmt)
else:
migrate(self.cr, pkg.installed_version)
finally:
if fp:
fp.close()
if fp2:
fp2.close()
if mod:
del mod
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
stefanklug/mapnik | refs/heads/master | scons/scons-local-2.3.6/SCons/Tool/sunlink.py | 4 | """SCons.Tool.sunlink
Tool-specific initialization for the Sun Solaris (Forte) linker.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunlink.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
import os
import os.path
import SCons.Util
import link
ccLinker = None
# search for the acc compiler and linker front end
try:
dirs = os.listdir('/opt')
except (IOError, OSError):
# Not being able to read the directory because it doesn't exist
# (IOError) or isn't readable (OSError) is okay.
dirs = []
for d in dirs:
linker = '/opt/' + d + '/bin/CC'
if os.path.exists(linker):
ccLinker = linker
break
def generate(env):
"""Add Builders and construction variables for Forte to an Environment."""
link.generate(env)
env['SHLINKFLAGS'] = SCons.Util.CLVar('$LINKFLAGS -G')
env['RPATHPREFIX'] = '-R'
env['RPATHSUFFIX'] = ''
env['_RPATH'] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}'
def exists(env):
return ccLinker
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
michaelhowden/eden | refs/heads/master | modules/tests/run_pdf_tests.py | 19 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Needs to be run in the web2py environment
# python web2py.py -S eden -M -R applications/clean/modules/tests/run_pdf_tests.py
import os
from gluon import *
from gluon.storage import Storage
from s3.s3rest import S3Request
from gluon.contrib.pdfinvoice import PDF
WRITE_PDF = True
def header(r):
img_path = os.path.join(current.request.application,
"static", "img","sahanalarge_14.png")
html_h = TABLE(TR(TD(IMG(_src=os.path.sep+img_path)),
TD("A lovely Title"),
TD(DIV(P("Right aligned text"),P("<i>not yet implemented</i>")),_align="right")),
TR(TD("This is a test header. These details should be" +
" displayed on every page. This dull and meaningless" +
" text should stretch across the entire page," +
" and then wrap around onto a second line.",_colspan=3)
)
)
return DIV(html_h)
def footer(r):
return P("A footer that will appear at the end of each page.", _halign="center")
def body_1(r):
# Here is some hard-coded test data
name = "Mr. Administrator, Sir"
org_name = "Example"
email = "admin@example.com"
logo_path = os.path.join(current.request.application,
"static", "img","blank-user.gif")
innerTable = TABLE(TR(TH(T("Name")), TH(T("Organisation")), TH(T("Email"))),
TR(TD(TD(name),TD(org_name),TD(email)))
)
person_details = TABLE(TR(TD(IMG(_src=os.path.sep+logo_path)),
TD(innerTable)
))
todo = DIV(P("So here is a list of features that could be added."),
LI("Extend basic html tag support: H1, H2, etc, LI, etc"),
LI("Add support for standard attributes: align, halign, valign"),
LI("Make the header and footer fit on a landscape page if the table changes the page layout"),
LI("Be able to control the width of table cells, being more smart about this would be good, but probably harder to implement. See the above tables the cells in the heading are all the same width. The inner table overflows the outer table."),
)
# Now put all the output together
output = DIV(H1("<b><h1>Here is an example of hardcoded data, created via a callback function which returns html.</h1></b>"),
P("<i>Unfortunately, the header tags don't yet work these can be added in the s3codecs/pdf.py file</i>"),
TABLE(TR(TH(T("Volunteer Service Record")))),
person_details,
todo,
)
return output
def test_pdf1():
"""
Generate a Test PDF that will demonstrate S3RL_PDF functionality
"""
r = S3Request(prefix="pr", name="person")
T = current.T
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request = r,
method = "list",
pdf_title = T("PDF Test Card I"),
pdf_header = header,
pdf_callback = body_1,
pdf_footer = footer
)
def test_pdf2():
"""
Generate a Test PDF that will demonstrate S3RL_PDF functionality
"""
r = S3Request(prefix="gis", name="hierarchy")
from s3.s3export import S3Exporter
exporter = S3Exporter().pdf
return exporter(r.resource,
request = r,
method = "list",
pdf_title = T("PDF Test Card II"),
pdf_table_autogrow = "B",
pdf_header = header,
pdf_footer = footer
)
pdf = test_pdf1()
if WRITE_PDF:
filename = os.path.join("applications", current.request.application,
"modules", "tests", "test_pdf_1.pdf")
f = open(filename, 'wb')
f.write(pdf)
f.close()
pdf = test_pdf2()
if WRITE_PDF:
filename = os.path.join("applications", current.request.application,
"modules", "tests", "test_pdf_2.pdf")
f = open(filename, 'wb')
f.write(pdf)
f.close()
|
JanDintel/ansible | refs/heads/devel | test/units/plugins/vars/__init__.py | 7690 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
pancentric/django-cms | refs/heads/develop | cms/plugins/file/migrations/0002_freeze.py | 11 | # -*- coding: utf-8 -*-
from south.db import db
from django.db import models
from cms.plugins.file.models import *
class Migration:
def forwards(self, orm):
"Write your forwards migration here"
def backwards(self, orm):
"Write your backwards migration here"
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'file.file': {
'Meta': {'object_name': 'File', 'db_table': "'cmsplugin_file'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['file']
|
alistairlow/tensorflow | refs/heads/master | tensorflow/contrib/text/python/ops/skip_gram_ops.py | 76 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Skip-gram sampling ops from https://arxiv.org/abs/1301.3781."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from tensorflow.contrib import lookup
from tensorflow.contrib.text.python.ops import gen_skip_gram_ops
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import input as input_ops
_checkpoint_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_skip_gram_ops.so"))
ops.NotDifferentiable("SkipGramGenerateCandidates")
def skip_gram_sample(input_tensor,
min_skips=1,
max_skips=5,
start=0,
limit=-1,
emit_self_as_target=False,
vocab_freq_table=None,
vocab_min_count=None,
vocab_subsampling=None,
corpus_size=None,
batch_size=None,
batch_capacity=None,
seed=None,
name=None):
"""Generates skip-gram token and label paired Tensors from the input tensor.
Generates skip-gram `("token", "label")` pairs using each element in the
rank-1 `input_tensor` as a token. The window size used for each token will be
randomly selected from the range specified by `[min_skips, max_skips]`,
inclusive. See https://arxiv.org/abs/1301.3781 for more details about
skip-gram.
For example, given `input_tensor = ["the", "quick", "brown", "fox", "jumps"]`,
`min_skips = 1`, `max_skips = 2`, `emit_self_as_target = False`, the output
`(tokens, labels)` pairs for the token "quick" will be randomly selected from
either `(tokens=["quick", "quick"], labels=["the", "brown"])` for 1 skip, or
`(tokens=["quick", "quick", "quick"], labels=["the", "brown", "fox"])` for 2
skips.
If `emit_self_as_target = True`, each token will also be emitted as a label
for itself. From the previous example, the output will be either
`(tokens=["quick", "quick", "quick"], labels=["the", "quick", "brown"])` for 1
skip, or `(tokens=["quick", "quick", "quick", "quick"], labels=["the",
"quick", "brown", "fox"])` for 2 skips.
The same process is repeated for each element of `input_tensor` and
concatenated together into the two output rank-1 `Tensors` (one for all the
tokens, another for all the labels).
If `vocab_freq_table` is specified, tokens in `input_tensor` that are not
present in the vocabulary are discarded. Tokens whose frequency counts are
below `vocab_min_count` are also discarded. Tokens whose frequency proportions
in the corpus exceed `vocab_subsampling` may be randomly down-sampled. See
Eq. 5 in http://arxiv.org/abs/1310.4546 for more details about subsampling.
Due to the random window sizes used for each token, the lengths of the outputs
are non-deterministic, unless `batch_size` is specified to batch the outputs
to always return `Tensors` of length `batch_size`.
Args:
input_tensor: A rank-1 `Tensor` from which to generate skip-gram candidates.
min_skips: `int` or scalar `Tensor` specifying the minimum window size to
randomly use for each token. Must be >= 0 and <= `max_skips`. If
`min_skips` and `max_skips` are both 0, the only label outputted will be
the token itself when `emit_self_as_target = True` - or no output
otherwise.
max_skips: `int` or scalar `Tensor` specifying the maximum window size to
randomly use for each token. Must be >= 0.
start: `int` or scalar `Tensor` specifying the position in
`input_tensor` from which to start generating skip-gram candidates.
limit: `int` or scalar `Tensor` specifying the maximum number of
elements in `input_tensor` to use in generating skip-gram candidates. -1
means to use the rest of the `Tensor` after `start`.
emit_self_as_target: `bool` or scalar `Tensor` specifying whether to emit
each token as a label for itself.
vocab_freq_table: (Optional) A lookup table (subclass of
`lookup.InitializableLookupTableBase`) that maps tokens to their raw
frequency counts. If specified, any token in `input_tensor` that is not
found in `vocab_freq_table` will be filtered out before generating
skip-gram candidates. While this will typically map to integer raw
frequency counts, it could also map to float frequency proportions.
`vocab_min_count` and `corpus_size` should be in the same units as this.
vocab_min_count: (Optional) `int`, `float`, or scalar `Tensor` specifying
minimum frequency threshold (from `vocab_freq_table`) for a token to be
kept in `input_tensor`. If this is specified, `vocab_freq_table` must also
be specified - and they should both be in the same units.
vocab_subsampling: (Optional) `float` specifying frequency proportion
threshold for tokens from `input_tensor`. Tokens that occur more
frequently (based on the ratio of the token's `vocab_freq_table` value to
the `corpus_size`) will be randomly down-sampled. Reasonable starting
values may be around 1e-3 or 1e-5. If this is specified, both
`vocab_freq_table` and `corpus_size` must also be specified. See Eq. 5
in http://arxiv.org/abs/1310.4546 for more details.
corpus_size: (Optional) `int`, `float`, or scalar `Tensor` specifying the
total number of tokens in the corpus (e.g., sum of all the frequency
counts of `vocab_freq_table`). Used with `vocab_subsampling` for
down-sampling frequently occurring tokens. If this is specified,
`vocab_freq_table` and `vocab_subsampling` must also be specified.
batch_size: (Optional) `int` specifying batch size of returned `Tensors`.
batch_capacity: (Optional) `int` specifying batch capacity for the queue
used for batching returned `Tensors`. Only has an effect if
`batch_size` > 0. Defaults to 100 * `batch_size` if not specified.
seed: (Optional) `int` used to create a random seed for window size and
subsampling. See `set_random_seed` docs for behavior.
name: (Optional) A `string` name or a name scope for the operations.
Returns:
A `tuple` containing (token, label) `Tensors`. Each output `Tensor` is of
rank-1 and has the same type as `input_tensor`. The `Tensors` will be of
length `batch_size`; if `batch_size` is not specified, they will be of
random length, though they will be in sync with each other as long as they
are evaluated together.
Raises:
ValueError: If `vocab_freq_table` is not provided, but `vocab_min_count`,
`vocab_subsampling`, or `corpus_size` is specified. If `vocab_subsampling`
and `corpus_size` are not both present or both absent.
"""
if vocab_freq_table is None and (vocab_min_count is not None or
vocab_subsampling is not None or
corpus_size is not None):
raise ValueError(
"vocab_freq_table is not provided, but vocab_min_count={}, "
"vocab_subsampling={}, or corpus_size={} is not None. These settings "
"are useless without a vocab_freq_table.".format(
vocab_min_count, vocab_subsampling, corpus_size))
if (vocab_subsampling is None) != (corpus_size is None):
raise ValueError(
"vocab_subsampling is {} while corpus_size is {} - both must be "
"provided in order for subsampling to work.".format(
vocab_subsampling, corpus_size))
with ops.name_scope(
name,
"skip_gram_sample",
values=[input_tensor, min_skips, max_skips, start, limit]):
input_tensor = _filter_input(
input_tensor=input_tensor,
vocab_freq_table=vocab_freq_table,
vocab_min_count=vocab_min_count,
vocab_subsampling=vocab_subsampling,
corpus_size=corpus_size,
seed=seed)
seed1, seed2 = random_seed.get_seed(seed)
tokens, labels = gen_skip_gram_ops.skip_gram_generate_candidates(
input_tensor=input_tensor,
min_skips=min_skips,
max_skips=max_skips,
start=start,
limit=limit,
emit_self_as_target=emit_self_as_target,
# Note that seed here should be seed1! This is due to
# GuardedPhiloxRandom's hard-coded attributes of "seed" and "seed2".
seed=seed1,
seed2=seed2)
# TODO(weiho): If the need arises, add support for sparse input_tensor that
# figures out sentence boundaries, then calls
# skip_gram_generate_candidates() on each sentence.
# Batches the (tokens, labels) outputs so that they will be of deterministic
# batch_size, to facilitate feeding them into the rest of the network.
if batch_size is not None and batch_size > 0:
batch_capacity = (batch_capacity
if (batch_capacity is not None and batch_capacity > 0)
else 100 * batch_size)
return input_ops.batch(
[tokens, labels],
batch_size,
capacity=batch_capacity,
enqueue_many=True)
return tokens, labels
def skip_gram_sample_with_text_vocab(input_tensor,
vocab_freq_file,
vocab_token_index=0,
vocab_token_dtype=dtypes.string,
vocab_freq_index=1,
vocab_freq_dtype=dtypes.float64,
vocab_delimiter=",",
vocab_min_count=0,
vocab_subsampling=None,
corpus_size=None,
min_skips=1,
max_skips=5,
start=0,
limit=-1,
emit_self_as_target=False,
batch_size=None,
batch_capacity=None,
seed=None,
name=None):
"""Skip-gram sampling with a text vocabulary file.
Wrapper around `skip_gram_sample()` for use with a text vocabulary file. The
vocabulary file is expected to be a plain-text file, with lines of
`vocab_delimiter`-separated columns. The `vocab_token_index` column should
contain the vocabulary term, while the `vocab_freq_index` column should
contain the number of times that term occurs in the corpus. For example, with
a text vocabulary file of:
```
bonjour,fr,42
hello,en,777
hola,es,99
```
You should set `vocab_delimiter=","`, `vocab_token_index=0`, and
`vocab_freq_index=2`.
See `skip_gram_sample()` documentation for more details about the skip-gram
sampling process.
Args:
input_tensor: A rank-1 `Tensor` from which to generate skip-gram candidates.
vocab_freq_file: `string` specifying full file path to the text vocab file.
vocab_token_index: `int` specifying which column in the text vocab file
contains the tokens.
vocab_token_dtype: `DType` specifying the format of the tokens in the text
vocab file.
vocab_freq_index: `int` specifying which column in the text vocab file
contains the frequency counts of the tokens.
vocab_freq_dtype: `DType` specifying the format of the frequency counts in
the text vocab file.
vocab_delimiter: `string` specifying the delimiter used in the text vocab
file.
vocab_min_count: `int`, `float`, or scalar `Tensor` specifying
minimum frequency threshold (from `vocab_freq_file`) for a token to be
kept in `input_tensor`. This should correspond with `vocab_freq_dtype`.
vocab_subsampling: (Optional) `float` specifying frequency proportion
threshold for tokens from `input_tensor`. Tokens that occur more
frequently will be randomly down-sampled. Reasonable starting values may
be around 1e-3 or 1e-5. See Eq. 5 in http://arxiv.org/abs/1310.4546 for
more details.
corpus_size: (Optional) `int`, `float`, or scalar `Tensor` specifying the
total number of tokens in the corpus (e.g., sum of all the frequency
counts of `vocab_freq_file`). Used with `vocab_subsampling` for
down-sampling frequently occurring tokens. If this is specified,
`vocab_freq_file` and `vocab_subsampling` must also be specified.
If `corpus_size` is needed but not supplied, then it will be calculated
from `vocab_freq_file`. You might want to supply your own value if you
have already eliminated infrequent tokens from your vocabulary files
(where frequency < vocab_min_count) to save memory in the internal token
lookup table. Otherwise, the unused tokens' variables will waste memory.
The user-supplied `corpus_size` value must be greater than or equal to the
sum of all the frequency counts of `vocab_freq_file`.
min_skips: `int` or scalar `Tensor` specifying the minimum window size to
randomly use for each token. Must be >= 0 and <= `max_skips`. If
`min_skips` and `max_skips` are both 0, the only label outputted will be
the token itself.
max_skips: `int` or scalar `Tensor` specifying the maximum window size to
randomly use for each token. Must be >= 0.
start: `int` or scalar `Tensor` specifying the position in `input_tensor`
from which to start generating skip-gram candidates.
limit: `int` or scalar `Tensor` specifying the maximum number of elements in
`input_tensor` to use in generating skip-gram candidates. -1 means to use
the rest of the `Tensor` after `start`.
emit_self_as_target: `bool` or scalar `Tensor` specifying whether to emit
each token as a label for itself.
batch_size: (Optional) `int` specifying batch size of returned `Tensors`.
batch_capacity: (Optional) `int` specifying batch capacity for the queue
used for batching returned `Tensors`. Only has an effect if
`batch_size` > 0. Defaults to 100 * `batch_size` if not specified.
seed: (Optional) `int` used to create a random seed for window size and
subsampling. See
[`set_random_seed`](../../g3doc/python/constant_op.md#set_random_seed)
for behavior.
name: (Optional) A `string` name or a name scope for the operations.
Returns:
A `tuple` containing (token, label) `Tensors`. Each output `Tensor` is of
rank-1 and has the same type as `input_tensor`. The `Tensors` will be of
length `batch_size`; if `batch_size` is not specified, they will be of
random length, though they will be in sync with each other as long as they
are evaluated together.
Raises:
ValueError: If `vocab_token_index` or `vocab_freq_index` is less than 0 or
exceeds the number of columns in `vocab_freq_file`. If `vocab_token_index`
and `vocab_freq_index` are both set to the same column. If any token in
`vocab_freq_file` has a negative frequency.
"""
if vocab_token_index < 0 or vocab_freq_index < 0:
raise ValueError(
"vocab_token_index={} and vocab_freq_index={} must both be >= 0.".
format(vocab_token_index, vocab_freq_index))
if vocab_token_index == vocab_freq_index:
raise ValueError(
"vocab_token_index and vocab_freq_index should be different, but are "
"both {}.".format(vocab_token_index))
# Iterates through the vocab file and calculates the number of vocab terms as
# well as the total corpus size (by summing the frequency counts of all the
# vocab terms).
calculated_corpus_size = 0.0
vocab_size = 0
with gfile.GFile(vocab_freq_file, mode="r") as f:
reader = csv.reader(f, delimiter=vocab_delimiter)
for row in reader:
if vocab_token_index >= len(row) or vocab_freq_index >= len(row):
raise ValueError(
"Row in vocab file only has {} columns, so vocab_token_index={} or "
"vocab_freq_index={} is out of bounds. Row content: {}".format(
len(row), vocab_token_index, vocab_freq_index, row))
vocab_size += 1
freq = vocab_freq_dtype.as_numpy_dtype(row[vocab_freq_index])
if freq < 0:
raise ValueError(
"Row in vocab file has negative frequency of {}. Row content: {}".
format(freq, row))
# Note: tokens whose frequencies are below vocab_min_count will still
# contribute to the total corpus size used for vocab subsampling.
calculated_corpus_size += freq
if not corpus_size:
corpus_size = calculated_corpus_size
elif calculated_corpus_size - corpus_size > 1e-6:
raise ValueError(
"`corpus_size`={} must be greater than or equal to the sum of all the "
"frequency counts ({}) of `vocab_freq_file` ({}).".format(
corpus_size, calculated_corpus_size, vocab_freq_file))
vocab_freq_table = lookup.HashTable(
lookup.TextFileInitializer(
filename=vocab_freq_file,
key_dtype=vocab_token_dtype,
key_index=vocab_token_index,
value_dtype=vocab_freq_dtype,
value_index=vocab_freq_index,
vocab_size=vocab_size,
delimiter=vocab_delimiter),
# For vocab terms not in vocab file, use a default value of -1.
default_value=-1)
return skip_gram_sample(
input_tensor,
min_skips=min_skips,
max_skips=max_skips,
start=start,
limit=limit,
emit_self_as_target=emit_self_as_target,
vocab_freq_table=vocab_freq_table,
vocab_min_count=vocab_min_count,
vocab_subsampling=vocab_subsampling,
# corpus_size is not used unless vocab_subsampling is specified.
corpus_size=None if vocab_subsampling is None else corpus_size,
batch_size=batch_size,
batch_capacity=batch_capacity,
seed=seed,
name=name)
def _filter_input(input_tensor, vocab_freq_table, vocab_min_count,
vocab_subsampling, corpus_size, seed):
"""Filters input tensor based on vocab freq, threshold, and subsampling."""
if vocab_freq_table is None:
return input_tensor
if not isinstance(vocab_freq_table, lookup.InitializableLookupTableBase):
raise ValueError(
"vocab_freq_table must be a subclass of "
"InitializableLookupTableBase (such as HashTable) instead of type "
"{}.".format(type(vocab_freq_table)))
with ops.name_scope(
"filter_vocab", values=[vocab_freq_table, input_tensor, vocab_min_count]):
freq = vocab_freq_table.lookup(input_tensor)
# Filters out elements in input_tensor that are not found in
# vocab_freq_table (table returns a default value of -1 specified above when
# an element is not found).
mask = math_ops.not_equal(freq, vocab_freq_table.default_value)
# Filters out elements whose vocab frequencies are less than the threshold.
if vocab_min_count is not None:
cast_threshold = math_ops.cast(vocab_min_count, freq.dtype)
mask = math_ops.logical_and(mask,
math_ops.greater_equal(freq, cast_threshold))
input_tensor = array_ops.boolean_mask(input_tensor, mask)
freq = array_ops.boolean_mask(freq, mask)
if not vocab_subsampling:
return input_tensor
if vocab_subsampling < 0 or vocab_subsampling > 1:
raise ValueError(
"Invalid vocab_subsampling={} - it should be within range [0, 1].".
format(vocab_subsampling))
# Subsamples the input tokens based on vocabulary frequency and
# vocab_subsampling threshold (ie randomly discard commonly appearing
# tokens).
with ops.name_scope(
"subsample_vocab", values=[input_tensor, freq, vocab_subsampling]):
corpus_size = math_ops.cast(corpus_size, dtypes.float64)
freq = math_ops.cast(freq, dtypes.float64)
vocab_subsampling = math_ops.cast(vocab_subsampling, dtypes.float64)
# From tensorflow_models/tutorials/embedding/word2vec_kernels.cc, which is
# suppose to correlate with Eq. 5 in http://arxiv.org/abs/1310.4546.
keep_prob = ((math_ops.sqrt(freq /
(vocab_subsampling * corpus_size)) + 1.0) *
(vocab_subsampling * corpus_size / freq))
random_prob = random_ops.random_uniform(
array_ops.shape(freq),
minval=0,
maxval=1,
dtype=dtypes.float64,
seed=seed)
mask = math_ops.less_equal(random_prob, keep_prob)
return array_ops.boolean_mask(input_tensor, mask)
|
FHannes/intellij-community | refs/heads/master | plugins/hg4idea/testData/bin/mercurial/dispatch.py | 91 | # dispatch.py - command dispatching for mercurial
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import os, sys, atexit, signal, pdb, socket, errno, shlex, time, traceback, re
import util, commands, hg, fancyopts, extensions, hook, error
import cmdutil, encoding
import ui as uimod
class request(object):
def __init__(self, args, ui=None, repo=None, fin=None, fout=None,
ferr=None):
self.args = args
self.ui = ui
self.repo = repo
# input/output/error streams
self.fin = fin
self.fout = fout
self.ferr = ferr
def run():
"run the command in sys.argv"
sys.exit((dispatch(request(sys.argv[1:])) or 0) & 255)
def dispatch(req):
"run the command specified in req.args"
if req.ferr:
ferr = req.ferr
elif req.ui:
ferr = req.ui.ferr
else:
ferr = sys.stderr
try:
if not req.ui:
req.ui = uimod.ui()
if '--traceback' in req.args:
req.ui.setconfig('ui', 'traceback', 'on')
# set ui streams from the request
if req.fin:
req.ui.fin = req.fin
if req.fout:
req.ui.fout = req.fout
if req.ferr:
req.ui.ferr = req.ferr
except util.Abort, inst:
ferr.write(_("abort: %s\n") % inst)
if inst.hint:
ferr.write(_("(%s)\n") % inst.hint)
return -1
except error.ParseError, inst:
if len(inst.args) > 1:
ferr.write(_("hg: parse error at %s: %s\n") %
(inst.args[1], inst.args[0]))
else:
ferr.write(_("hg: parse error: %s\n") % inst.args[0])
return -1
msg = ' '.join(' ' in a and repr(a) or a for a in req.args)
starttime = time.time()
ret = None
try:
ret = _runcatch(req)
return ret
finally:
duration = time.time() - starttime
req.ui.log("commandfinish", "%s exited %s after %0.2f seconds\n",
msg, ret or 0, duration)
def _runcatch(req):
def catchterm(*args):
raise error.SignalInterrupt
ui = req.ui
try:
for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
num = getattr(signal, name, None)
if num:
signal.signal(num, catchterm)
except ValueError:
pass # happens if called in a thread
try:
try:
# enter the debugger before command execution
if '--debugger' in req.args:
ui.warn(_("entering debugger - "
"type c to continue starting hg or h for help\n"))
pdb.set_trace()
try:
return _dispatch(req)
finally:
ui.flush()
except: # re-raises
# enter the debugger when we hit an exception
if '--debugger' in req.args:
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[2])
ui.traceback()
raise
# Global exception handling, alphabetically
# Mercurial-specific first, followed by built-in and library exceptions
except error.AmbiguousCommand, inst:
ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
(inst.args[0], " ".join(inst.args[1])))
except error.ParseError, inst:
if len(inst.args) > 1:
ui.warn(_("hg: parse error at %s: %s\n") %
(inst.args[1], inst.args[0]))
else:
ui.warn(_("hg: parse error: %s\n") % inst.args[0])
return -1
except error.LockHeld, inst:
if inst.errno == errno.ETIMEDOUT:
reason = _('timed out waiting for lock held by %s') % inst.locker
else:
reason = _('lock held by %s') % inst.locker
ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
except error.LockUnavailable, inst:
ui.warn(_("abort: could not lock %s: %s\n") %
(inst.desc or inst.filename, inst.strerror))
except error.CommandError, inst:
if inst.args[0]:
ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
commands.help_(ui, inst.args[0], full=False, command=True)
else:
ui.warn(_("hg: %s\n") % inst.args[1])
commands.help_(ui, 'shortlist')
except error.OutOfBandError, inst:
ui.warn(_("abort: remote error:\n"))
ui.warn(''.join(inst.args))
except error.RepoError, inst:
ui.warn(_("abort: %s!\n") % inst)
if inst.hint:
ui.warn(_("(%s)\n") % inst.hint)
except error.ResponseError, inst:
ui.warn(_("abort: %s") % inst.args[0])
if not isinstance(inst.args[1], basestring):
ui.warn(" %r\n" % (inst.args[1],))
elif not inst.args[1]:
ui.warn(_(" empty string\n"))
else:
ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
except error.RevlogError, inst:
ui.warn(_("abort: %s!\n") % inst)
except error.SignalInterrupt:
ui.warn(_("killed!\n"))
except error.UnknownCommand, inst:
ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
try:
# check if the command is in a disabled extension
# (but don't check for extensions themselves)
commands.help_(ui, inst.args[0], unknowncmd=True)
except error.UnknownCommand:
commands.help_(ui, 'shortlist')
except error.InterventionRequired, inst:
ui.warn("%s\n" % inst)
return 1
except util.Abort, inst:
ui.warn(_("abort: %s\n") % inst)
if inst.hint:
ui.warn(_("(%s)\n") % inst.hint)
except ImportError, inst:
ui.warn(_("abort: %s!\n") % inst)
m = str(inst).split()[-1]
if m in "mpatch bdiff".split():
ui.warn(_("(did you forget to compile extensions?)\n"))
elif m in "zlib".split():
ui.warn(_("(is your Python install correct?)\n"))
except IOError, inst:
if util.safehasattr(inst, "code"):
ui.warn(_("abort: %s\n") % inst)
elif util.safehasattr(inst, "reason"):
try: # usually it is in the form (errno, strerror)
reason = inst.reason.args[1]
except (AttributeError, IndexError):
# it might be anything, for example a string
reason = inst.reason
ui.warn(_("abort: error: %s\n") % reason)
elif util.safehasattr(inst, "args") and inst.args[0] == errno.EPIPE:
if ui.debugflag:
ui.warn(_("broken pipe\n"))
elif getattr(inst, "strerror", None):
if getattr(inst, "filename", None):
ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
else:
ui.warn(_("abort: %s\n") % inst.strerror)
else:
raise
except OSError, inst:
if getattr(inst, "filename", None) is not None:
ui.warn(_("abort: %s: '%s'\n") % (inst.strerror, inst.filename))
else:
ui.warn(_("abort: %s\n") % inst.strerror)
except KeyboardInterrupt:
try:
ui.warn(_("interrupted!\n"))
except IOError, inst:
if inst.errno == errno.EPIPE:
if ui.debugflag:
ui.warn(_("\nbroken pipe\n"))
else:
raise
except MemoryError:
ui.warn(_("abort: out of memory\n"))
except SystemExit, inst:
# Commands shouldn't sys.exit directly, but give a return code.
# Just in case catch this and and pass exit code to caller.
return inst.code
except socket.error, inst:
ui.warn(_("abort: %s\n") % inst.args[-1])
except: # re-raises
myver = util.version()
# For compatibility checking, we discard the portion of the hg
# version after the + on the assumption that if a "normal
# user" is running a build with a + in it the packager
# probably built from fairly close to a tag and anyone with a
# 'make local' copy of hg (where the version number can be out
# of date) will be clueful enough to notice the implausible
# version number and try updating.
compare = myver.split('+')[0]
ct = tuplever(compare)
worst = None, ct, ''
for name, mod in extensions.extensions():
testedwith = getattr(mod, 'testedwith', '')
report = getattr(mod, 'buglink', _('the extension author.'))
if not testedwith.strip():
# We found an untested extension. It's likely the culprit.
worst = name, 'unknown', report
break
if compare not in testedwith.split() and testedwith != 'internal':
tested = [tuplever(v) for v in testedwith.split()]
lower = [t for t in tested if t < ct]
nearest = max(lower or tested)
if worst[0] is None or nearest < worst[1]:
worst = name, nearest, report
if worst[0] is not None:
name, testedwith, report = worst
if not isinstance(testedwith, str):
testedwith = '.'.join([str(c) for c in testedwith])
warning = (_('** Unknown exception encountered with '
'possibly-broken third-party extension %s\n'
'** which supports versions %s of Mercurial.\n'
'** Please disable %s and try your action again.\n'
'** If that fixes the bug please report it to %s\n')
% (name, testedwith, name, report))
else:
warning = (_("** unknown exception encountered, "
"please report by visiting\n") +
_("** http://mercurial.selenic.com/wiki/BugTracker\n"))
warning += ((_("** Python %s\n") % sys.version.replace('\n', '')) +
(_("** Mercurial Distributed SCM (version %s)\n") % myver) +
(_("** Extensions loaded: %s\n") %
", ".join([x[0] for x in extensions.extensions()])))
ui.log("commandexception", "%s\n%s\n", warning, traceback.format_exc())
ui.warn(warning)
raise
return -1
def tuplever(v):
try:
return tuple([int(i) for i in v.split('.')])
except ValueError:
return tuple()
def aliasargs(fn, givenargs):
args = getattr(fn, 'args', [])
if args:
cmd = ' '.join(map(util.shellquote, args))
nums = []
def replacer(m):
num = int(m.group(1)) - 1
nums.append(num)
if num < len(givenargs):
return givenargs[num]
raise util.Abort(_('too few arguments for command alias'))
cmd = re.sub(r'\$(\d+|\$)', replacer, cmd)
givenargs = [x for i, x in enumerate(givenargs)
if i not in nums]
args = shlex.split(cmd)
return args + givenargs
class cmdalias(object):
def __init__(self, name, definition, cmdtable):
self.name = self.cmd = name
self.cmdname = ''
self.definition = definition
self.args = []
self.opts = []
self.help = ''
self.norepo = True
self.optionalrepo = False
self.badalias = False
try:
aliases, entry = cmdutil.findcmd(self.name, cmdtable)
for alias, e in cmdtable.iteritems():
if e is entry:
self.cmd = alias
break
self.shadows = True
except error.UnknownCommand:
self.shadows = False
if not self.definition:
def fn(ui, *args):
ui.warn(_("no definition for alias '%s'\n") % self.name)
return 1
self.fn = fn
self.badalias = True
return
if self.definition.startswith('!'):
self.shell = True
def fn(ui, *args):
env = {'HG_ARGS': ' '.join((self.name,) + args)}
def _checkvar(m):
if m.groups()[0] == '$':
return m.group()
elif int(m.groups()[0]) <= len(args):
return m.group()
else:
ui.debug("No argument found for substitution "
"of %i variable in alias '%s' definition."
% (int(m.groups()[0]), self.name))
return ''
cmd = re.sub(r'\$(\d+|\$)', _checkvar, self.definition[1:])
replace = dict((str(i + 1), arg) for i, arg in enumerate(args))
replace['0'] = self.name
replace['@'] = ' '.join(args)
cmd = util.interpolate(r'\$', replace, cmd, escape_prefix=True)
return util.system(cmd, environ=env, out=ui.fout)
self.fn = fn
return
args = shlex.split(self.definition)
self.cmdname = cmd = args.pop(0)
args = map(util.expandpath, args)
for invalidarg in ("--cwd", "-R", "--repository", "--repo", "--config"):
if _earlygetopt([invalidarg], args):
def fn(ui, *args):
ui.warn(_("error in definition for alias '%s': %s may only "
"be given on the command line\n")
% (self.name, invalidarg))
return 1
self.fn = fn
self.badalias = True
return
try:
tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
if len(tableentry) > 2:
self.fn, self.opts, self.help = tableentry
else:
self.fn, self.opts = tableentry
self.args = aliasargs(self.fn, args)
if cmd not in commands.norepo.split(' '):
self.norepo = False
if cmd in commands.optionalrepo.split(' '):
self.optionalrepo = True
if self.help.startswith("hg " + cmd):
# drop prefix in old-style help lines so hg shows the alias
self.help = self.help[4 + len(cmd):]
self.__doc__ = self.fn.__doc__
except error.UnknownCommand:
def fn(ui, *args):
ui.warn(_("alias '%s' resolves to unknown command '%s'\n") \
% (self.name, cmd))
try:
# check if the command is in a disabled extension
commands.help_(ui, cmd, unknowncmd=True)
except error.UnknownCommand:
pass
return 1
self.fn = fn
self.badalias = True
except error.AmbiguousCommand:
def fn(ui, *args):
ui.warn(_("alias '%s' resolves to ambiguous command '%s'\n") \
% (self.name, cmd))
return 1
self.fn = fn
self.badalias = True
def __call__(self, ui, *args, **opts):
if self.shadows:
ui.debug("alias '%s' shadows command '%s'\n" %
(self.name, self.cmdname))
if util.safehasattr(self, 'shell'):
return self.fn(ui, *args, **opts)
else:
try:
util.checksignature(self.fn)(ui, *args, **opts)
except error.SignatureError:
args = ' '.join([self.cmdname] + self.args)
ui.debug("alias '%s' expands to '%s'\n" % (self.name, args))
raise
def addaliases(ui, cmdtable):
# aliases are processed after extensions have been loaded, so they
# may use extension commands. Aliases can also use other alias definitions,
# but only if they have been defined prior to the current definition.
for alias, definition in ui.configitems('alias'):
aliasdef = cmdalias(alias, definition, cmdtable)
try:
olddef = cmdtable[aliasdef.cmd][0]
if olddef.definition == aliasdef.definition:
continue
except (KeyError, AttributeError):
# definition might not exist or it might not be a cmdalias
pass
cmdtable[aliasdef.name] = (aliasdef, aliasdef.opts, aliasdef.help)
if aliasdef.norepo:
commands.norepo += ' %s' % alias
if aliasdef.optionalrepo:
commands.optionalrepo += ' %s' % alias
def _parse(ui, args):
options = {}
cmdoptions = {}
try:
args = fancyopts.fancyopts(args, commands.globalopts, options)
except fancyopts.getopt.GetoptError, inst:
raise error.CommandError(None, inst)
if args:
cmd, args = args[0], args[1:]
aliases, entry = cmdutil.findcmd(cmd, commands.table,
ui.configbool("ui", "strict"))
cmd = aliases[0]
args = aliasargs(entry[0], args)
defaults = ui.config("defaults", cmd)
if defaults:
args = map(util.expandpath, shlex.split(defaults)) + args
c = list(entry[1])
else:
cmd = None
c = []
# combine global options into local
for o in commands.globalopts:
c.append((o[0], o[1], options[o[1]], o[3]))
try:
args = fancyopts.fancyopts(args, c, cmdoptions, True)
except fancyopts.getopt.GetoptError, inst:
raise error.CommandError(cmd, inst)
# separate global options back out
for o in commands.globalopts:
n = o[1]
options[n] = cmdoptions[n]
del cmdoptions[n]
return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
def _parseconfig(ui, config):
"""parse the --config options from the command line"""
configs = []
for cfg in config:
try:
name, value = cfg.split('=', 1)
section, name = name.split('.', 1)
if not section or not name:
raise IndexError
ui.setconfig(section, name, value)
configs.append((section, name, value))
except (IndexError, ValueError):
raise util.Abort(_('malformed --config option: %r '
'(use --config section.name=value)') % cfg)
return configs
def _earlygetopt(aliases, args):
"""Return list of values for an option (or aliases).
The values are listed in the order they appear in args.
The options and values are removed from args.
>>> args = ['x', '--cwd', 'foo', 'y']
>>> _earlygetopt(['--cwd'], args), args
(['foo'], ['x', 'y'])
>>> args = ['x', '--cwd=bar', 'y']
>>> _earlygetopt(['--cwd'], args), args
(['bar'], ['x', 'y'])
>>> args = ['x', '-R', 'foo', 'y']
>>> _earlygetopt(['-R'], args), args
(['foo'], ['x', 'y'])
>>> args = ['x', '-Rbar', 'y']
>>> _earlygetopt(['-R'], args), args
(['bar'], ['x', 'y'])
"""
try:
argcount = args.index("--")
except ValueError:
argcount = len(args)
shortopts = [opt for opt in aliases if len(opt) == 2]
values = []
pos = 0
while pos < argcount:
fullarg = arg = args[pos]
equals = arg.find('=')
if equals > -1:
arg = arg[:equals]
if arg in aliases:
del args[pos]
if equals > -1:
values.append(fullarg[equals + 1:])
argcount -= 1
else:
if pos + 1 >= argcount:
# ignore and let getopt report an error if there is no value
break
values.append(args.pop(pos))
argcount -= 2
elif arg[:2] in shortopts:
# short option can have no following space, e.g. hg log -Rfoo
values.append(args.pop(pos)[2:])
argcount -= 1
else:
pos += 1
return values
def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
# run pre-hook, and abort if it fails
hook.hook(lui, repo, "pre-%s" % cmd, True, args=" ".join(fullargs),
pats=cmdpats, opts=cmdoptions)
ret = _runcommand(ui, options, cmd, d)
# run post-hook, passing command result
hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
result=ret, pats=cmdpats, opts=cmdoptions)
return ret
def _getlocal(ui, rpath):
"""Return (path, local ui object) for the given target path.
Takes paths in [cwd]/.hg/hgrc into account."
"""
try:
wd = os.getcwd()
except OSError, e:
raise util.Abort(_("error getting current working directory: %s") %
e.strerror)
path = cmdutil.findrepo(wd) or ""
if not path:
lui = ui
else:
lui = ui.copy()
lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
if rpath and rpath[-1]:
path = lui.expandpath(rpath[-1])
lui = ui.copy()
lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
return path, lui
def _checkshellalias(lui, ui, args):
options = {}
try:
args = fancyopts.fancyopts(args, commands.globalopts, options)
except fancyopts.getopt.GetoptError:
return
if not args:
return
norepo = commands.norepo
optionalrepo = commands.optionalrepo
def restorecommands():
commands.norepo = norepo
commands.optionalrepo = optionalrepo
cmdtable = commands.table.copy()
addaliases(lui, cmdtable)
cmd = args[0]
try:
aliases, entry = cmdutil.findcmd(cmd, cmdtable,
lui.configbool("ui", "strict"))
except (error.AmbiguousCommand, error.UnknownCommand):
restorecommands()
return
cmd = aliases[0]
fn = entry[0]
if cmd and util.safehasattr(fn, 'shell'):
d = lambda: fn(ui, *args[1:])
return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d,
[], {})
restorecommands()
_loaded = set()
def _dispatch(req):
args = req.args
ui = req.ui
# read --config before doing anything else
# (e.g. to change trust settings for reading .hg/hgrc)
cfgs = _parseconfig(ui, _earlygetopt(['--config'], args))
# check for cwd
cwd = _earlygetopt(['--cwd'], args)
if cwd:
os.chdir(cwd[-1])
rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
path, lui = _getlocal(ui, rpath)
# Now that we're operating in the right directory/repository with
# the right config settings, check for shell aliases
shellaliasfn = _checkshellalias(lui, ui, args)
if shellaliasfn:
return shellaliasfn()
# Configure extensions in phases: uisetup, extsetup, cmdtable, and
# reposetup. Programs like TortoiseHg will call _dispatch several
# times so we keep track of configured extensions in _loaded.
extensions.loadall(lui)
exts = [ext for ext in extensions.extensions() if ext[0] not in _loaded]
# Propagate any changes to lui.__class__ by extensions
ui.__class__ = lui.__class__
# (uisetup and extsetup are handled in extensions.loadall)
for name, module in exts:
cmdtable = getattr(module, 'cmdtable', {})
overrides = [cmd for cmd in cmdtable if cmd in commands.table]
if overrides:
ui.warn(_("extension '%s' overrides commands: %s\n")
% (name, " ".join(overrides)))
commands.table.update(cmdtable)
_loaded.add(name)
# (reposetup is handled in hg.repository)
addaliases(lui, commands.table)
# check for fallback encoding
fallback = lui.config('ui', 'fallbackencoding')
if fallback:
encoding.fallbackencoding = fallback
fullargs = args
cmd, func, args, options, cmdoptions = _parse(lui, args)
if options["config"]:
raise util.Abort(_("option --config may not be abbreviated!"))
if options["cwd"]:
raise util.Abort(_("option --cwd may not be abbreviated!"))
if options["repository"]:
raise util.Abort(_(
"option -R has to be separated from other options (e.g. not -qR) "
"and --repository may only be abbreviated as --repo!"))
if options["encoding"]:
encoding.encoding = options["encoding"]
if options["encodingmode"]:
encoding.encodingmode = options["encodingmode"]
if options["time"]:
def get_times():
t = os.times()
if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
t = (t[0], t[1], t[2], t[3], time.clock())
return t
s = get_times()
def print_time():
t = get_times()
ui.warn(_("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
(t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
atexit.register(print_time)
uis = set([ui, lui])
if req.repo:
uis.add(req.repo.ui)
# copy configs that were passed on the cmdline (--config) to the repo ui
for cfg in cfgs:
req.repo.ui.setconfig(*cfg)
if options['verbose'] or options['debug'] or options['quiet']:
for opt in ('verbose', 'debug', 'quiet'):
val = str(bool(options[opt]))
for ui_ in uis:
ui_.setconfig('ui', opt, val)
if options['traceback']:
for ui_ in uis:
ui_.setconfig('ui', 'traceback', 'on')
if options['noninteractive']:
for ui_ in uis:
ui_.setconfig('ui', 'interactive', 'off')
if cmdoptions.get('insecure', False):
for ui_ in uis:
ui_.setconfig('web', 'cacerts', '')
if options['version']:
return commands.version_(ui)
if options['help']:
return commands.help_(ui, cmd)
elif not cmd:
return commands.help_(ui, 'shortlist')
repo = None
cmdpats = args[:]
if cmd not in commands.norepo.split():
# use the repo from the request only if we don't have -R
if not rpath and not cwd:
repo = req.repo
if repo:
# set the descriptors of the repo ui to those of ui
repo.ui.fin = ui.fin
repo.ui.fout = ui.fout
repo.ui.ferr = ui.ferr
else:
try:
repo = hg.repository(ui, path=path)
if not repo.local():
raise util.Abort(_("repository '%s' is not local") % path)
if options['hidden']:
repo = repo.unfiltered()
repo.ui.setconfig("bundle", "mainreporoot", repo.root)
except error.RequirementError:
raise
except error.RepoError:
if cmd not in commands.optionalrepo.split():
if (cmd in commands.inferrepo.split() and
args and not path): # try to infer -R from command args
repos = map(cmdutil.findrepo, args)
guess = repos[0]
if guess and repos.count(guess) == len(repos):
req.args = ['--repository', guess] + fullargs
return _dispatch(req)
if not path:
raise error.RepoError(_("no repository found in '%s'"
" (.hg not found)")
% os.getcwd())
raise
if repo:
ui = repo.ui
args.insert(0, repo)
elif rpath:
ui.warn(_("warning: --repository ignored\n"))
msg = ' '.join(' ' in a and repr(a) or a for a in fullargs)
ui.log("command", '%s\n', msg)
d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
try:
return runcommand(lui, repo, cmd, fullargs, ui, options, d,
cmdpats, cmdoptions)
finally:
if repo and repo != req.repo:
repo.close()
def lsprofile(ui, func, fp):
format = ui.config('profiling', 'format', default='text')
field = ui.config('profiling', 'sort', default='inlinetime')
limit = ui.configint('profiling', 'limit', default=30)
climit = ui.configint('profiling', 'nested', default=5)
if format not in ['text', 'kcachegrind']:
ui.warn(_("unrecognized profiling format '%s'"
" - Ignored\n") % format)
format = 'text'
try:
from mercurial import lsprof
except ImportError:
raise util.Abort(_(
'lsprof not available - install from '
'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
p = lsprof.Profiler()
p.enable(subcalls=True)
try:
return func()
finally:
p.disable()
if format == 'kcachegrind':
import lsprofcalltree
calltree = lsprofcalltree.KCacheGrind(p)
calltree.output(fp)
else:
# format == 'text'
stats = lsprof.Stats(p.getstats())
stats.sort(field)
stats.pprint(limit=limit, file=fp, climit=climit)
def statprofile(ui, func, fp):
try:
import statprof
except ImportError:
raise util.Abort(_(
'statprof not available - install using "easy_install statprof"'))
freq = ui.configint('profiling', 'freq', default=1000)
if freq > 0:
statprof.reset(freq)
else:
ui.warn(_("invalid sampling frequency '%s' - ignoring\n") % freq)
statprof.start()
try:
return func()
finally:
statprof.stop()
statprof.display(fp)
def _runcommand(ui, options, cmd, cmdfunc):
def checkargs():
try:
return cmdfunc()
except error.SignatureError:
raise error.CommandError(cmd, _("invalid arguments"))
if options['profile']:
profiler = os.getenv('HGPROF')
if profiler is None:
profiler = ui.config('profiling', 'type', default='ls')
if profiler not in ('ls', 'stat'):
ui.warn(_("unrecognized profiler '%s' - ignored\n") % profiler)
profiler = 'ls'
output = ui.config('profiling', 'output')
if output:
path = ui.expandpath(output)
fp = open(path, 'wb')
else:
fp = sys.stderr
try:
if profiler == 'ls':
return lsprofile(ui, checkargs, fp)
else:
return statprofile(ui, checkargs, fp)
finally:
if output:
fp.close()
else:
return checkargs()
|
cmspsgp31/anubis | refs/heads/master | anubis/app/apps.py | 1 | # Copyright (C) 2016, Ugo Pozo 2016, Câmara Municipal de São Paulo
# apps.py - configurações do aplicativo Anubis.
# Este arquivo é parte do software Anubis.
# Anubis é um software livre: você pode redistribuí-lo e/ou
# modificá-lo sob os termos da Licença Pública Geral GNU (GNU General Public
# License), tal como é publicada pela Free Software Foundation, na
# versão 3 da licença, ou (sua decisão) qualquer versão posterior.
# Anubis é distribuído na esperança de que seja útil, mas SEM NENHUMA
# GARANTIA; nem mesmo a garantia implícita de VALOR COMERCIAL ou ADEQUAÇÃO PARA
# UM PROPÓSITO EM PARTICULAR. Veja a Licença Pública Geral GNU para mais
# detalhes.
# Você deve ter recebido uma cópia da Licença Pública Geral GNU junto com este
# programa. Se não, consulte <http://www.gnu.org/licenses/>.
from django.apps import AppConfig
class AnubisConfig(AppConfig):
name = 'anubis.app'
label = 'anubis'
verbose_name = 'Anubis'
|
msebire/intellij-community | refs/heads/master | python/testData/psi/ExecPy3.py | 105 | def exec(): pass
|
talau/ns-3.18-wifi-queue-red | refs/heads/master | src/virtual-net-device/bindings/modulegen__gcc_LP64.py | 24 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.virtual_net_device', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration]
module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## virtual-net-device.h (module 'virtual-net-device'): ns3::VirtualNetDevice [class]
module.add_class('VirtualNetDevice', parent=root_module['ns3::NetDevice'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) *', 'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) **', 'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) *&', 'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) *', 'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) **', 'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) *&', 'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3VirtualNetDevice_methods(root_module, root_module['ns3::VirtualNetDevice'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[])
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3VirtualNetDevice_methods(root_module, cls):
## virtual-net-device.h (module 'virtual-net-device'): ns3::VirtualNetDevice::VirtualNetDevice(ns3::VirtualNetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::VirtualNetDevice const &', 'arg0')])
## virtual-net-device.h (module 'virtual-net-device'): ns3::VirtualNetDevice::VirtualNetDevice() [constructor]
cls.add_constructor([])
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Ptr<ns3::Channel> ns3::VirtualNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): uint32_t ns3::VirtualNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): uint16_t ns3::VirtualNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Address ns3::VirtualNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): ns3::Ptr<ns3::Node> ns3::VirtualNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): static ns3::TypeId ns3::VirtualNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::Receive(ns3::Ptr<ns3::Packet> packet, uint16_t protocol, ns3::Address const & source, ns3::Address const & destination, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('Receive',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'destination'), param('ns3::NetDevice::PacketType', 'packetType')])
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetIsPointToPoint(bool isPointToPoint) [member function]
cls.add_method('SetIsPointToPoint',
'void',
[param('bool', 'isPointToPoint')])
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetNeedsArp(bool needsArp) [member function]
cls.add_method('SetNeedsArp',
'void',
[param('bool', 'needsArp')])
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetSendCallback(ns3::Callback<bool, ns3::Ptr<ns3::Packet>, ns3::Address const&, ns3::Address const&, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> transmitCb) [member function]
cls.add_method('SetSendCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::Packet >, ns3::Address const &, ns3::Address const &, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'transmitCb')])
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::SetSupportsSendFrom(bool supportsSendFrom) [member function]
cls.add_method('SetSupportsSendFrom',
'void',
[param('bool', 'supportsSendFrom')])
## virtual-net-device.h (module 'virtual-net-device'): bool ns3::VirtualNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## virtual-net-device.h (module 'virtual-net-device'): void ns3::VirtualNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
kionz/librime | refs/heads/master | thirdparty/src/opencc/deps/gtest-1.7.0/test/gtest_list_tests_unittest.py | 1898 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
|
qqzwc/XX-Net | refs/heads/master | code/default/python27/1.0/lib/hmac.py | 70 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from operator import _compare_digest as compare_digest
trans_5C = "".join ([chr (x ^ 0x5C) for x in xrange(256)])
trans_36 = "".join ([chr (x ^ 0x36) for x in xrange(256)])
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
# A unique object passed by HMAC.copy() to the HMAC constructor, in order
# that the latter return very quickly. HMAC("") in contrast is quite
# expensive.
_secret_backdoor_key = []
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object.
Defaults to hashlib.md5.
"""
if key is _secret_backdoor_key: # cheap
return
if digestmod is None:
import hashlib
digestmod = hashlib.md5
if hasattr(digestmod, '__call__'):
self.digest_cons = digestmod
else:
self.digest_cons = lambda d='': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
# Very low blocksize, most likely a legacy value like
# Lib/sha.py and Lib/md5.py have.
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = self.__class__(_secret_backdoor_key)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
|
cselis86/edx-platform | refs/heads/installer | common/lib/capa/capa/checker.py | 123 | #!/usr/bin/env python
"""
Commandline tool for doing operations on Problems
"""
from __future__ import unicode_literals
import argparse
import logging
import sys
from path import path
from cStringIO import StringIO
from calc import UndefinedVariable
from capa.capa_problem import LoncapaProblem
from mako.lookup import TemplateLookup
logging.basicConfig(format="%(levelname)s %(message)s")
log = logging.getLogger('capa.checker')
class DemoSystem(object):
def __init__(self):
self.lookup = TemplateLookup(directories=[path(__file__).dirname() / 'templates'])
self.DEBUG = True
def render_template(self, template_filename, dictionary, context=None):
if context is None:
context = {}
context_dict = {}
context_dict.update(dictionary)
context_dict.update(context)
return self.lookup.get_template(template_filename).render(**context_dict)
def main():
parser = argparse.ArgumentParser(description='Check Problem Files')
parser.add_argument("command", choices=['test', 'show']) # Watch? Render? Open?
parser.add_argument("files", nargs="+", type=argparse.FileType('r'))
parser.add_argument("--seed", required=False, type=int)
parser.add_argument("--log-level", required=False, default="INFO",
choices=['info', 'debug', 'warn', 'error',
'INFO', 'DEBUG', 'WARN', 'ERROR'])
args = parser.parse_args()
log.setLevel(args.log_level.upper())
system = DemoSystem()
for problem_file in args.files:
log.info("Opening {0}".format(problem_file.name))
try:
problem = LoncapaProblem(problem_file, "fakeid", seed=args.seed, system=system)
except Exception as ex:
log.error("Could not parse file {0}".format(problem_file.name))
log.exception(ex)
continue
if args.command == 'test':
command_test(problem)
elif args.command == 'show':
command_show(problem)
problem_file.close()
# In case we want to do anything else here.
def command_show(problem):
"""Display the text for this problem"""
print problem.get_html()
def command_test(problem):
# We're going to trap stdout/stderr from the problems (yes, some print)
old_stdout, old_stderr = sys.stdout, sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
check_that_suggested_answers_work(problem)
check_that_blanks_fail(problem)
log_captured_output(sys.stdout,
"captured stdout from {0}".format(problem))
log_captured_output(sys.stderr,
"captured stderr from {0}".format(problem))
except Exception as e:
log.exception(e)
finally:
sys.stdout, sys.stderr = old_stdout, old_stderr
def check_that_blanks_fail(problem):
"""Leaving it blank should never work. Neither should a space."""
blank_answers = dict((answer_id, u"")
for answer_id in problem.get_question_answers())
grading_results = problem.grade_answers(blank_answers)
try:
assert(all(result == 'incorrect' for result in grading_results.values()))
except AssertionError:
log.error("Blank accepted as correct answer in {0} for {1}"
.format(problem,
[answer_id for answer_id, result
in sorted(grading_results.items())
if result != 'incorrect']))
def check_that_suggested_answers_work(problem):
"""Split this up so that we're only used for formula/numeric answers.
Examples of where this fails:
* Displayed answers use units but acceptable ones do not.
- L1e0.xml
- Presents itself as UndefinedVariable (when it tries to pass to calc)
* "a or d" is what's displayed, but only "a" or "d" is accepted, not the
string "a or d".
- L1-e00.xml
"""
# These are actual answers we get from the responsetypes
real_answers = problem.get_question_answers()
# all_answers is real_answers + blanks for other answer_ids for which the
# responsetypes can't provide us pre-canned answers (customresponse)
all_answer_ids = problem.get_answer_ids()
all_answers = dict((answer_id, real_answers.get(answer_id, ""))
for answer_id in all_answer_ids)
log.debug("Real answers: {0}".format(real_answers))
if real_answers:
try:
real_results = dict((answer_id, result) for answer_id, result
in problem.grade_answers(all_answers).items()
if answer_id in real_answers)
log.debug(real_results)
assert(all(result == 'correct'
for answer_id, result in real_results.items()))
except UndefinedVariable as uv_exc:
log.error("The variable \"{0}\" specified in the ".format(uv_exc) +
"solution isn't recognized (is it a units measure?).")
except AssertionError:
log.error("The following generated answers were not accepted for {0}:"
.format(problem))
for question_id, result in sorted(real_results.items()):
if result != 'correct':
log.error(" {0} = {1}".format(question_id, real_answers[question_id]))
except Exception as ex:
log.error("Uncaught error in {0}".format(problem))
log.exception(ex)
def log_captured_output(output_stream, stream_name):
output_stream.seek(0)
output_text = output_stream.read()
if output_text:
log.info("##### Begin {0} #####\n".format(stream_name) + output_text)
log.info("##### End {0} #####".format(stream_name))
if __name__ == '__main__':
sys.exit(main())
|
skylander86/ycml | refs/heads/master | setup.py | 1 | import os
from setuptools import setup, find_packages
import sys
if sys.version_info.major < 3:
raise Exception('This is a Python 3 only package. Please upgrade.')
#end if
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(SCRIPT_DIR, 'README.rst'), 'r') as readme_file:
readme = readme_file.read()
with open(os.path.join(SCRIPT_DIR, 'requirements.txt'), 'r') as f:
requirements = list(filter(None, (line.strip() for line in f if not line.startswith('#'))))
with open(os.path.join(SCRIPT_DIR, 'VERSION'), 'r') as f:
version = f.read().strip()
setup(
name='ycml',
version=version,
author='yc sim',
author_email='hello@yanchuan.sg',
description='yc\'s collection of convenience code for developing ML applications.',
long_description=readme,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3 :: Only',
'Natural Language :: English',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Operating System :: POSIX',
],
keywords=['machine learning', 'ml', 'natural language processing', 'nlp', 'utilities'],
url='http://github.com/skylander86/ycml',
license='Apache Software License 2.0',
packages=find_packages('.'),
install_requires=requirements,
include_package_data=True,
zip_safe=False,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.