repo_name
stringlengths 5
88
| path
stringlengths 4
199
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 855
832k
| license
stringclasses 15
values | hash
int64 -9,223,128,179,723,874,000
9,223,237,214B
| line_mean
float64 3.5
99
| line_max
int64 15
999
| alpha_frac
float64 0.25
0.87
| autogenerated
bool 1
class | ratio
float64 1.5
7.55
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class | score
float64 0
0.2
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
lakshayg/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/moving_stats_test.py | 46 | 5107 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for computing moving-average statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import moving_stats
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
rng = np.random.RandomState(0)
class MovingReduceMeanVarianceTest(test.TestCase):
def test_assign_moving_mean_variance(self):
shape = [1, 2]
true_mean = np.array([[0., 3.]])
true_stddev = np.array([[1.1, 0.5]])
with self.test_session() as sess:
# Start "x" out with this mean.
mean_var = variables.Variable(array_ops.zeros_like(true_mean))
variance_var = variables.Variable(array_ops.ones_like(true_stddev))
x = random_ops.random_normal(shape, dtype=np.float64, seed=0)
x = true_stddev * x + true_mean
ema, emv = moving_stats.assign_moving_mean_variance(
mean_var, variance_var, x, decay=0.99)
self.assertEqual(ema.dtype.base_dtype, dtypes.float64)
self.assertEqual(emv.dtype.base_dtype, dtypes.float64)
# Run 1000 updates; moving averages should be near the true values.
variables.global_variables_initializer().run()
for _ in range(2000):
sess.run([ema, emv])
[mean_var_, variance_var_, ema_, emv_] = sess.run([
mean_var, variance_var, ema, emv])
# Test that variables are passed-through.
self.assertAllEqual(mean_var_, ema_)
self.assertAllEqual(variance_var_, emv_)
# Test that values are as expected.
self.assertAllClose(true_mean, ema_, rtol=0.005, atol=0.015)
self.assertAllClose(true_stddev**2., emv_, rtol=0.06, atol=0.)
# Change the mean, var then update some more. Moving averages should
# re-converge.
sess.run([
mean_var.assign(np.array([[-1., 2.]])),
variance_var.assign(np.array([[2., 1.]])),
])
for _ in range(2000):
sess.run([ema, emv])
[mean_var_, variance_var_, ema_, emv_] = sess.run([
mean_var, variance_var, ema, emv])
# Test that variables are passed-through.
self.assertAllEqual(mean_var_, ema_)
self.assertAllEqual(variance_var_, emv_)
# Test that values are as expected.
self.assertAllClose(true_mean, ema_, rtol=0.005, atol=0.015)
self.assertAllClose(true_stddev**2., emv_, rtol=0.1, atol=0.)
def test_moving_mean_variance(self):
shape = [1, 2]
true_mean = np.array([[0., 3.]])
true_stddev = np.array([[1.1, 0.5]])
with self.test_session() as sess:
# Start "x" out with this mean.
x = random_ops.random_normal(shape, dtype=np.float64, seed=0)
x = true_stddev * x + true_mean
ema, emv = moving_stats.moving_mean_variance(
x, decay=0.99)
self.assertEqual(ema.dtype.base_dtype, dtypes.float64)
self.assertEqual(emv.dtype.base_dtype, dtypes.float64)
# Run 1000 updates; moving averages should be near the true values.
variables.global_variables_initializer().run()
for _ in range(2000):
sess.run([ema, emv])
[ema_, emv_] = sess.run([ema, emv])
self.assertAllClose(true_mean, ema_, rtol=0.005, atol=0.015)
self.assertAllClose(true_stddev**2., emv_, rtol=0.06, atol=0.)
class MovingLogExponentialMovingMeanExpTest(test.TestCase):
def test_assign_log_moving_mean_exp(self):
shape = [1, 2]
true_mean = np.array([[0., 3.]])
true_stddev = np.array([[1.1, 0.5]])
decay = 0.99
with self.test_session() as sess:
# Start "x" out with this mean.
x = random_ops.random_normal(shape, dtype=np.float64, seed=0)
x = true_stddev * x + true_mean
log_mean_exp_var = variables.Variable(array_ops.zeros_like(true_mean))
variables.global_variables_initializer().run()
log_mean_exp = moving_stats.assign_log_moving_mean_exp(
log_mean_exp_var, x, decay=decay)
expected_ = np.zeros_like(true_mean)
for _ in range(2000):
x_, log_mean_exp_ = sess.run([x, log_mean_exp])
expected_ = np.log(decay * np.exp(expected_) + (1 - decay) * np.exp(x_))
self.assertAllClose(expected_, log_mean_exp_, rtol=1e-6, atol=1e-9)
if __name__ == "__main__":
test.main()
| apache-2.0 | 1,057,861,798,496,475,500 | 38.589147 | 80 | 0.647543 | false | 3.42063 | true | false | false | 0.01077 |
ledocc/hunter | maintenance/upload-cache-to-github.py | 1 | 15346 | #!/usr/bin/env python3
import argparse
import base64
import hashlib
import json
import os
import requests
import sys
import time
def sleep_time(attempt):
if attempt <= 0:
raise Exception('Unexpected')
if attempt == 1:
return 0
if attempt == 2:
return 15
if attempt == 3:
return 60
if attempt == 4:
return 90
if attempt == 5:
return 300
return 1200
def retry(func_in):
def func_out(*args, **kwargs):
retry_max = 10
i = 0
while True:
i = i + 1
try:
return func_in(*args, **kwargs)
except Exception as exc:
if i > retry_max:
raise exc
print('Operation failed. Exception:\n {}'.format(exc))
sec = sleep_time(i)
print('Retry #{} (of {}) after {} seconds'.format(i, retry_max, sec))
time.sleep(sec)
raise Exception('Unreachable')
return func_out
# http://stackoverflow.com/a/16696317/2288008
@retry
def download_file(url, local_file, auth, chunk_size=1024):
print('Downloading:\n {}\n -> {}'.format(url, local_file))
r = requests.get(url, stream=True, auth=auth)
if not r.ok:
raise Exception('Downloading failed')
with open(local_file, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
class Github:
def __init__(self, username, password, repo_owner, repo):
self.repo_owner = repo_owner
self.repo = repo
self.auth = requests.auth.HTTPBasicAuth(username, password)
self.simple_request()
@retry
def simple_request(self):
print('Processing simple request')
r = requests.get('https://api.github.com', auth=self.auth)
if not r.ok:
sys.exit('Simple request fails. Check your password.')
limit = int(r.headers['X-RateLimit-Remaining'])
print('GitHub Limit: {}'.format(limit))
if limit == 0:
raise Exception('GitHub limit is 0')
print('Simple request pass')
@retry
def get_release_by_tag(self, tagname):
print('Get release-id by tag `{}`'.format(tagname))
# https://developer.github.com/v3/repos/releases/#get-a-release-by-tag-name
# GET /repos/:owner/:repo/releases/tags/:tag
url = 'https://api.github.com/repos/{}/{}/releases/tags/{}'.format(
self.repo_owner,
self.repo,
tagname
)
r = requests.get(url, auth=self.auth)
if not r.ok:
raise Exception('Get tag id failed. Requested url: {}'.format(url))
tag_id = r.json()['id']
print('Tag id is {}'.format(tag_id))
return tag_id
@retry
def find_asset_id_by_name(self, release_id, name):
# https://developer.github.com/v3/repos/releases/#list-assets-for-a-release
# GET /repos/:owner/:repo/releases/:id/assets
page_number = 1
keep_searching = True
while keep_searching:
url = 'https://api.github.com/repos/{}/{}/releases/{}/assets?page={}'.format(
self.repo_owner,
self.repo,
release_id,
page_number
)
print('Requesting URL: {}'.format(url))
r = requests.get(url, auth=self.auth)
if not r.ok:
raise Exception('Getting list of assets failed. Requested url: {}'.format(url))
json = r.json()
for x in json:
if name == x['name']:
return x['id']
if not json:
keep_searching = False
page_number = page_number + 1
return None
@retry
def delete_asset_by_id(self, asset_id, asset_name):
# https://developer.github.com/v3/repos/releases/#delete-a-release-asset
# DELETE /repos/:owner/:repo/releases/assets/:id
url = 'https://api.github.com/repos/{}/{}/releases/assets/{}'.format(
self.repo_owner,
self.repo,
asset_id
)
r = requests.delete(url, auth=self.auth)
if r.status_code == 204:
print('Asset removed: {}'.format(asset_name))
else:
raise Exception('Deletion of asset failed: {}'.format(asset_name))
def delete_asset_if_exists(self, release_id, asset_name):
asset_id = self.find_asset_id_by_name(release_id, asset_name)
if not asset_id:
print('Asset not exists: {}'.format(asset_name))
return
self.delete_asset_by_id(asset_id, asset_name)
def upload_bzip_once(self, url, local_path):
headers = {'Content-Type': 'application/x-bzip2'}
file_to_upload = open(local_path, 'rb')
r = requests.post(url, data=file_to_upload, headers=headers, auth=self.auth)
if not r.ok:
raise Exception('Upload of file failed')
@retry
def upload_bzip(self, url, local_path, release_id, asset_name):
print('Uploading:\n {}\n -> {}'.format(local_path, url))
try:
self.upload_bzip_once(url, local_path)
except Exception as exc:
print('Exception catched while uploading, removing asset...')
self.delete_asset_if_exists(release_id, asset_name)
raise exc
def upload_raw_file(self, local_path):
tagname = 'cache'
release_id = self.get_release_by_tag(tagname)
# https://developer.github.com/v3/repos/releases/#upload-a-release-asset
# POST https://<upload_url>/repos/:owner/:repo/releases/:id/assets?name=foo.zip
asset_name = hashlib.sha1(open(local_path, 'rb').read()).hexdigest()
asset_name = asset_name + '.tar.bz2'
url = 'https://uploads.github.com/repos/{}/{}/releases/{}/assets?name={}'.format(
self.repo_owner,
self.repo,
release_id,
asset_name
)
self.upload_bzip(url, local_path, release_id, asset_name)
@retry
def create_new_file(self, local_path, github_path):
# https://developer.github.com/v3/repos/contents/#create-a-file
# PUT /repos/:owner/:repo/contents/:path
message = 'Uploading cache info\n\n'
message += 'Create file: {}\n\n'.format(github_path)
env_list = []
job_url = ''
if os.getenv('TRAVIS') == 'true':
# * https://docs.travis-ci.com/user/environment-variables/#Default-Environment-Variables
message += 'Travis:\n'
job_url = 'https://travis-ci.org/{}/jobs/{}'.format(
os.getenv('TRAVIS_REPO_SLUG'),
os.getenv('TRAVIS_JOB_ID')
)
env_list += [
'TRAVIS_BRANCH',
'TRAVIS_BUILD_ID',
'TRAVIS_BUILD_NUMBER',
'TRAVIS_JOB_ID',
'TRAVIS_JOB_NUMBER',
'TRAVIS_OS_NAME',
'TRAVIS_REPO_SLUG'
]
if os.getenv('APPVEYOR') == 'True':
# * http://www.appveyor.com/docs/environment-variables
message += 'AppVeyor:\n'
job_url = 'https://ci.appveyor.com/project/{}/{}/build/{}/job/{}'.format(
os.getenv('APPVEYOR_ACCOUNT_NAME'),
os.getenv('APPVEYOR_PROJECT_SLUG'),
os.getenv('APPVEYOR_BUILD_VERSION'),
os.getenv('APPVEYOR_JOB_ID')
)
env_list += [
'APPVEYOR_ACCOUNT_NAME',
'APPVEYOR_PROJECT_ID',
'APPVEYOR_PROJECT_NAME',
'APPVEYOR_PROJECT_SLUG',
'APPVEYOR_BUILD_ID',
'APPVEYOR_BUILD_NUMBER',
'APPVEYOR_BUILD_VERSION',
'APPVEYOR_JOB_ID',
'APPVEYOR_JOB_NAME',
'APPVEYOR_REPO_BRANCH'
]
# Store some info about build
for env_name in env_list:
env_value = os.getenv(env_name)
if env_value:
message += ' {}: {}\n'.format(env_name, env_value)
if job_url:
message += '\n Job URL: {}\n'.format(job_url)
url = 'https://api.github.com/repos/{}/{}/contents/{}'.format(
self.repo_owner,
self.repo,
github_path
)
content = base64.b64encode(open(local_path, 'rb').read()).decode()
put_data = {
'message': message,
'content': content
}
r = requests.put(url, data = json.dumps(put_data), auth=self.auth)
if not r.ok:
print('Put failed. Status code: {}'.format(r.status_code))
if r.status_code == 409:
raise Exception('Unavailable repository')
return r.ok
class CacheEntry:
def __init__(self, cache_done_path, cache_dir, temp_dir):
self.cache_dir = cache_dir
self.temp_dir = temp_dir
self.cache_raw = os.path.join(self.cache_dir, 'raw')
self.cache_meta = os.path.join(self.cache_dir, 'meta')
self.cache_done_path = cache_done_path
if not os.path.exists(cache_done_path):
raise Exception('File not exists: {}'.format(cache_done_path))
self.cache_done_dir = os.path.dirname(self.cache_done_path)
self.from_server = os.path.join(self.cache_done_dir, 'from.server')
self.cache_sha1 = os.path.join(self.cache_done_dir, 'cache.sha1')
self.internal_deps_id = os.path.split(self.cache_done_dir)[0]
self.type_id = os.path.split(self.internal_deps_id)[0]
self.args_id = os.path.split(self.type_id)[0]
self.archive_id = os.path.split(self.args_id)[0]
self.version = os.path.split(self.archive_id)[0]
self.component = os.path.split(self.version)[0]
if os.path.split(self.component)[1].startswith('__'):
self.package = os.path.split(self.component)[0]
else:
self.package = self.component
self.component = ''
self.toolchain_id = os.path.split(self.package)[0]
meta = os.path.split(self.toolchain_id)[0]
assert(meta == self.cache_meta)
def entry_from_server(self):
return os.path.exists(self.from_server)
def upload_raw(self, github):
sha1 = open(self.cache_sha1, 'r').read()
raw = os.path.join(self.cache_raw, sha1 + '.tar.bz2')
github.upload_raw_file(raw)
def upload_meta(self, github, cache_done):
self.upload_files_from_common_dir(github, self.cache_done_dir, cache_done)
self.upload_files_from_common_dir(github, self.internal_deps_id, cache_done)
self.upload_files_from_common_dir(github, self.type_id, cache_done)
self.upload_files_from_common_dir(github, self.args_id, cache_done)
self.upload_files_from_common_dir(github, self.archive_id, cache_done)
self.upload_files_from_common_dir(github, self.version, cache_done, check_is_empty=True)
if self.component != '':
self.upload_files_from_common_dir(github, self.component, cache_done, check_is_empty=True)
self.upload_files_from_common_dir(github, self.package, cache_done, check_is_empty=True)
self.upload_files_from_common_dir(github, self.toolchain_id, cache_done)
def upload_files_from_common_dir(self, github, dir_path, cache_done, check_is_empty=False):
to_upload = []
for i in os.listdir(dir_path):
if i == 'cmake.lock':
continue
if i == 'DONE':
continue
done_file = (i == 'CACHE.DONE') or (i == 'basic-deps.DONE')
if done_file and not cache_done:
continue
if not done_file and cache_done:
continue
i_fullpath = os.path.join(dir_path, i)
if os.path.isfile(i_fullpath):
to_upload.append(i_fullpath)
if not cache_done:
if check_is_empty and len(to_upload) != 0:
raise Exception('Expected no files in directory: {}'.format(dir_path))
if not check_is_empty and len(to_upload) == 0:
raise Exception('No files found in directory: {}'.format(dir_path))
for i in to_upload:
relative_path = i[len(self.cache_meta)+1:]
relative_unix_path = relative_path.replace('\\', '/') # convert windows path
expected_download_url = 'https://raw.githubusercontent.com/{}/{}/master/{}'.format(
github.repo_owner,
github.repo,
relative_unix_path
)
github_url = 'https://github.com/{}/{}/blob/master/{}'.format(
github.repo_owner,
github.repo,
relative_unix_path
)
print('Uploading file: {}'.format(relative_path))
ok = github.create_new_file(i, relative_unix_path)
if not ok:
print('Already exist')
temp_file = os.path.join(self.temp_dir, '__TEMP.FILE')
download_file(expected_download_url, temp_file, github.auth)
expected_content = open(i, 'rb').read()
downloaded_content = open(temp_file, 'rb').read()
expected_hash = hashlib.sha1(expected_content).hexdigest()
downloaded_hash = hashlib.sha1(downloaded_content).hexdigest()
os.remove(temp_file)
if expected_hash != downloaded_hash:
print('Hash mismatch:')
print(
' expected {} (content: {})'.format(
expected_hash, expected_content
)
)
print(
' downloaded {} (content: {})'.format(
downloaded_hash, downloaded_content
)
)
print('GitHub link: {}'.format(github_url))
raise Exception('Hash mismatch')
class Cache:
def __init__(self, cache_dir, temp_dir):
self.entries = self.create_entries(cache_dir, temp_dir)
self.remove_entries_from_server()
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
def create_entries(self, cache_dir, temp_dir):
print('Searching for CACHE.DONE files in directory:\n {}\n'.format(cache_dir))
entries = []
for root, dirs, files in os.walk(cache_dir):
for filename in files:
if filename == 'CACHE.DONE':
entries.append(CacheEntry(os.path.join(root, filename), cache_dir, temp_dir))
print('Found {} files:'.format(len(entries)))
for i in entries:
print(' {}'.format(i.cache_done_path))
print('')
return entries
def remove_entries_from_server(self):
new_entries = []
for i in self.entries:
if i.entry_from_server():
print('Remove entry (from server):\n {}'.format(i.cache_done_path))
else:
new_entries.append(i)
self.entries = new_entries
def upload_raw(self, github):
for i in self.entries:
i.upload_raw(github)
def upload_meta(self, github, cache_done):
for i in self.entries:
i.upload_meta(github, cache_done)
parser = argparse.ArgumentParser(
description='Script for uploading Hunter cache files to GitHub'
)
parser.add_argument(
'--username',
required=True,
help='Username'
)
parser.add_argument(
'--repo-owner',
required=True,
help='Repository owner'
)
parser.add_argument(
'--repo',
required=True,
help='Repository name'
)
parser.add_argument(
'--cache-dir',
required=True,
help='Hunter cache directory, e.g. /home/user/.hunter/_Base/Cache'
)
parser.add_argument(
'--temp-dir',
required=True,
help='Temporary directory where files will be downloaded for verification'
)
parser.add_argument(
'--skip-raw', action='store_true', help="Skip uploading of raw files"
)
args = parser.parse_args()
cache_dir = os.path.normpath(args.cache_dir)
if not os.path.isdir(cache_dir):
raise Exception('Not a directory: {}'.format(cache_dir))
if os.path.split(cache_dir)[1] != 'Cache':
raise Exception('Cache directory path should ends with Cache: {}'.format(cache_dir))
cache = Cache(cache_dir, args.temp_dir)
password = os.getenv('GITHUB_USER_PASSWORD')
if password == '' or password is None:
raise Exception('Expected GITHUB_USER_PASSWORD environment variable')
github = Github(
username = args.username,
password = password,
repo_owner = args.repo_owner,
repo = args.repo
)
if args.skip_raw:
print('*** WARNING *** Skip uploading of raw files')
else:
cache.upload_raw(github)
cache.upload_meta(github, cache_done=False)
print('Uploading DONE files')
cache.upload_meta(github, cache_done=True) # Should be last
| bsd-2-clause | -5,787,685,296,679,103,000 | 30.706612 | 96 | 0.621856 | false | 3.345542 | false | false | false | 0.00997 |
zvolsky/platby | modules/export_csv.py | 2 | 9784 | #!/usr/bin/env python
# -*- coding: utf8 -*-
u'''
export do csv pro předstírání Jirkovým Společným aktivitám, že načítají csv z banky
exportují se 2 typy záznamů:
"plánované"
- příjem bankou s neznámým ss (možná dočasně, a později i toto budeme zadržovat)
- zatím nepodporován příjem s neznámým ss na místě do pokladny (a asi ani nebude - musí se registrovat)
- manuálně naplánované - k 13.10. sice naprogramováno, ale neodzkoušeno,
a nebudeme to spouštět, nebude-li na to extra tlak.
Pokud by se přece jen dodělávala možnost naplánování částky, nechť nemění
zálohu. Zálohu totiž změní až samotný csv export. Je naprogramován tak,
že zkontroluje stav zálohy v okamžiku exportu, a není-li dost peněz,
částku sníží (k vynulování zálohy) nebo export stopne (při záloze <=0)
"dlužné"
- má-li zůstatek na záloze a zjistí-li se, že Jirkovy aktivity mají pohledávku,
exportuje se pohledávka, snížená o právě exportované plánované
db.systab musí obsahovat (viz fce csv.py: init_systab):
kod: last_csv, hodnota: dd.mm.rrrr posledního exportu
kod: csv_czk , hodnota: nnnnnn.nn zůstatek na účtu
'''
url_zakaznici = 'http://www.spolecneaktivity.cz/administrace/komunity/980eb0fc-3a9d-43b6-8028-59bf28fbb67e/zakaznici'
import os
from datetime import datetime, date, timedelta, time
from time import sleep
from bs4 import BeautifulSoup
from spolecneaktivity_cz import sa_login, unformat_castka
from mz_wkasa_platby import Uc_sa
import vfp
def export_csv(db, app_folder):
rec_last_csv = db(db.systab.kod=='last_csv').select().first()
datum_od = datetime.strptime(rec_last_csv.hodnota, '%d.%m.%Y'
).date()+timedelta(1)
datum_do = date.today()-timedelta(1)
csv_nejpozdeji = datetime.combine(datum_do, time(23,59,59))
if datum_od>datum_do:
print "Od posledního generování musí uplynout alespoň jeden den."
return 0
vypis = ''
sumplus = summinus = 0
evidence = {} # key je auth_user.id, value je celková částka z 379-11 záznamů
pocet, vypis, sumplus, summinus = predej_planovane(
evidence, db, vypis, sumplus, summinus, csv_nejpozdeji)
pocet, vypis, sumplus, summinus = predej_dluzne(
evidence, db, vypis, sumplus, summinus, pocet, csv_nejpozdeji)
make_csv(db, vypis, sumplus, summinus, rec_last_csv, datum_od, datum_do,
app_folder)
return pocet
def predej_planovane(evidence, db, vypis, sumplus, summinus, csv_nejpozdeji):
'''podle stavu na 379-11 účtu (plánováno k převodu na jirkovo)'''
predat = db((db.pohyb.iddal==Uc_sa.pro_sa)&(db.pohyb.id_pokynu==None)
).select()
# protože naplánováním převodu u zákazníka se ještě nesmí měnit záloha,
# až zde, samotným převodem
pocet = 0
for predat1 in predat:
vypis1, sumplus1, summinus1 = __predej1(
predat1, evidence, db, csv_nejpozdeji)
vypis += vypis1
sumplus += sumplus1
summinus += summinus1
pocet += 1
return pocet, vypis, sumplus, summinus
def __predej1(pohyb, evidence, db, csv_nejpozdeji):
if pohyb.idauth_user:
zakaznik = db(db.auth_user.id==pohyb.idauth_user).select(
db.auth_user.id, db.auth_user.ss).first()
ss = zakaznik.ss or pohyb.ss
evidence[zakaznik.id] = evidence.get(zakaznik.id, 0) + castka
else:
zakaznik = None
ss = pohyb.ss
if pohyb.iddal==Uc_sa.pro_sa: # předem plánovaný převod na SA
if zakaznik:
castka = min(zakaznik.zaloha, pohyb.castka)
if castka<=0:
pohyb.update_record(castka=0, id_pokynu="nemá peníze")
return '', 0, 0 # zrušeno pro nedostatek peněz na záloze
if castka!=pohyb.castka:
pohyb.update_record(castka=castka)
zakaznik.update_record(zaloha=zakaznik.zaloha-castka)
else:
castka = pohyb.castka
id_pohybu = db.pohyb.insert(
idauth_user=pohyb.idauth_user,
idma_dati=Uc_sa.pro_sa,
iddal=Uc_sa.oz_sa,
datum=datetime.now(),
castka=castka,
cislo_uctu=pohyb.cislo_uctu,
kod_banky=pohyb.kod_banky,
nazev_banky=pohyb.nazev_banky,
vs=pohyb.vs,
ss=ss,
ks=pohyb.ks,
id_pokynu=str(pohyb.id)
)
pohyb.update_record(id_pokynu=id_pohybu)
vypis1, sumplus1, summinus1 = __add_csv(pohyb, csv_nejpozdeji)
#db.commit() - commit je v kontroléru csv.py
return vypis1, sumplus1, summinus1
def predej_dluzne(evidence, db, vypis, sumplus, summinus, pocet,
csv_nejpozdeji):
#jirkovo = nacti_jirkovo_ze_souboru('jirkovo.html')
br = sa_login("Mirek Zv.", "miiirek1+1")
sleep(2)
jirkovo = br.open(url_zakaznici).read()
vfp.strtofile(jirkovo, os.path.join(os.getcwd(),
'applications', 'platby', 'downloads', 'zakaznici.html'))
# mírná duplicita v controllers/platby.py, kde tento soubor parsuji
# ke zjištění aktuální zálohy
soup = BeautifulSoup(jirkovo)
for zakaznik in soup.table('tr'):
sloupce = zakaznik('td')
if len(sloupce): # první řádek (hlavička) totiž <td> nemá
planovano = unformat_castka(sloupce[-1].string)
neuhrazeno = unformat_castka(sloupce[-2].string)
zaloha = unformat_castka(sloupce[-4].string)
chybi = planovano + neuhrazeno - zaloha
if chybi>0:
symbol = str(sloupce[0].a.string).strip().lstrip('0')
wk_zakaznik = db(db.auth_user.ss==symbol).select().first()
if wk_zakaznik and wk_zakaznik.zaloha>0:
jeste_chybi = chybi - evidence.get(wk_zakaznik.id, 0)
# minus kolik jsme mu právě vyplatili v predej_planovane()
if jeste_chybi:
fl_zaloha = float(wk_zakaznik.zaloha)
popis = (u'z sa.cz poptával %s Kč' % jeste_chybi
) if (jeste_chybi>fl_zaloha) else ''
posleme_mu = min(jeste_chybi, fl_zaloha)
id_pohybu = db.pohyb.insert(
idauth_user=wk_zakaznik.id,
idma_dati=Uc_sa.oz,
iddal=Uc_sa.oz_sa,
datum=datetime.now(),
castka=posleme_mu,
ss=symbol,
popis=popis
)
wk_zakaznik.update_record(zaloha=fl_zaloha-posleme_mu)
pohyb = db(db.pohyb.id==id_pohybu).select().first()
vypis1, sumplus1, summinus1 = __add_csv(
pohyb, csv_nejpozdeji)
vypis += vypis1
sumplus += sumplus1
summinus += summinus1
#db.commit() - commit je v kontroléru csv.py
pocet += 1
return pocet, vypis, sumplus, summinus
def __add_csv(pohyb, csv_nejpozdeji):
'''zapíše jednu transakci do csv
'''
#0;06.09.2013;85,00;670100-2207318349;6210;;2550;425;PAVEL KUBIŠTA;Bezhotovostní příjem;;;BRE Bank S.A., organizační složka podniku;
vypis1 = (
'0;%(datum)s;%(castka)s;%(ucet)s;%(banka)s;%(ks)s;%(vs)s;%(ss)s;%(ss)s;%(bhp)s;;;banka;\n'
% dict(datum=min(pohyb.datum, csv_nejpozdeji)
.strftime('%d.%m.%Y'),
castka=('%0.2f' % pohyb.castka).replace('.',','),
ucet=pohyb.cislo_uctu or '',
banka=pohyb.kod_banky or '',
bhp=u'Bezhotovostní příjem'.encode('cp1250'),
ks=pohyb.ks or '',
vs=pohyb.vs or '',
ss=pohyb.ss or ''))
sumplus1 = float(pohyb.castka) if pohyb.castka>0 else 0.
summinus1 = float(pohyb.castka) if pohyb.castka<0 else 0.
return vypis1, sumplus1, summinus1
def make_csv(db, vypis, sumplus, summinus, rec_last_csv, datum_od, datum_do,
app_folder):
maska = vfp.filetostr(os.path.join(os.getcwd(),
'applications', 'platby', 'others', 'maska.csv'))
rec_csv_czk = db(db.systab.kod=='csv_czk').select().first()
vychozi = float(rec_csv_czk.hodnota)
koncova = vychozi + sumplus + summinus
vfp.strtofile(maska % dict(
nyni=datetime.now().strftime('%d.%m.%Y %H:%M:%S'),
od=_no_zeros(datum_od),
do=_no_zeros(datum_do),
vychozi=_form_castka(vychozi),
koncova=_form_castka(koncova),
prijmy=_form_castka(sumplus),
vydaje=_form_castka(summinus),
zaznamy=vypis,
suma=_form_castka(sumplus+summinus)
), os.path.join(app_folder, 'import_wk',
datum_od.strftime('%Y_%m%d')+datum_do.strftime('_%m%d')+'.csv'))
rec_csv_czk.update_record(hodnota=str(koncova))
rec_last_csv.update_record(hodnota=datum_do.strftime('%d.%m.%Y'))
#db.commit() - commit je v kontroléru csv.py
def _no_zeros(datum):
return datum.strftime('%d.%m.%Y').replace('.0','.').lstrip('0')
def _form_castka(castka):
return ('%0.2f' % castka).replace('.',',')
| agpl-3.0 | 134,352,567,931,373,810 | 44.650485 | 141 | 0.568262 | false | 2.605748 | false | false | false | 0.008741 |
klassenjs/geomoose-js | util/createSprite.py | 3 | 3696 | #!/usr/bin/python
#
# Copyright (c) 2009-2012, Dan "Ducky" Little & GeoMOOSE.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Yay, PIL!!
import Image
import sys
import os
# All of these images are assumed to be 20x20.
# It's fine if they are smaller because they'll just get more padding,
# larger than 20x20 and it'll get a little trickier.
imagesRoot = '../images/';
spriteFolders = ['silk/', 'geosilk/img/'];
try:
os.remove(imageRoot+'all.png')
except:
pass
all_files = list()
for path in spriteFolders:
imgs = os.listdir(imagesRoot+path)
imgs = map(lambda x: imagesRoot+path+x, imgs)
all_files = all_files + imgs
images = list()
for f in all_files:
# this test should be better... but I'm lazy.
if((f.find('.png') > 0 or f.find('.gif') > 0) and f.find('-selected') < 0):
images.append(f)
images.sort()
sprite = Image.new('RGBA', (40,len(images)*30), (0,0,0,0))
i = 0
cssHeader = """
/* Notice:
*
* This file is generated (along with ../images/all.png)
* from the images in images/toolbar by createSprite.py
* in the ../util directory
*/
.sprite-control {
background-image: url('../images/all.png');
background-repeat: no-repeat;
height: 18px; /* nee, < 2.6 20px */
width: 20px;
display: inline-block;
cursor: pointer;
background-position: 0px -%dpx; /* This should default to the 'find' icon */
/* IE hacks for the sprites. */
*zoom: 1;
*display: inline;
}
"""
cssTemplate = """ .sprite-control-%s { background-position: 0px -%dpx; } """
cssSelectedTemplate = """ .sprite-control-%s-selected { background-position: -20px -%dpx !important; } """
cssText = "/*\n" + open('../LICENSE', 'r').read() + '*/\n\n' + cssHeader
height = (len(images)+1)*30+10
findPosition = 0
for image in images:
imagePath = image.split('/')
imageName = imagePath[-1].split('.')[0]
selectedImage = image
for ext in ['gif','png','jpg']:
selectedImage = selectedImage.replace('.'+ext,'-selected.'+ext)
if(not(os.path.isfile(selectedImage))):
selectedImage = image
icon = Image.open(image)
selected_icon = Image.open(selectedImage)
offsetLeft = (20 - icon.size[0]) / 2
offsetHeight = (20 - icon.size[1]) / 2
sprite.paste(icon, (offsetLeft, i*30+10+offsetHeight))
offsetLeft = 20 + (20 - selected_icon.size[0]) / 2
offsetHeight = (20 - selected_icon.size[1]) / 2
sprite.paste(selected_icon, (offsetLeft, i*30+10+offsetHeight))
i+=1
h = height-(height-((i-1)*30))+10
cssText += cssTemplate % (imageName , h)
cssText += cssSelectedTemplate % (imageName, h)
cssText += '\n'
if(imageName == 'find'):
findPosition = h
#print cssTemplate % (imageName , ((i+1)*30+10))
print cssText % findPosition
sprite.save(imagesRoot+'all.png')
| mit | -1,007,491,008,696,859,800 | 28.806452 | 106 | 0.693723 | false | 3.153584 | false | false | false | 0.018669 |
gunthercox/ChatterBot | chatterbot/logic/unit_conversion.py | 1 | 5824 | from chatterbot.logic import LogicAdapter
from chatterbot.conversation import Statement
from chatterbot.exceptions import OptionalDependencyImportError
from chatterbot import languages
from chatterbot import parsing
from mathparse import mathparse
import re
class UnitConversion(LogicAdapter):
"""
The UnitConversion logic adapter parse inputs to convert values
between several metric units.
For example:
User: 'How many meters are in one kilometer?'
Bot: '1000.0'
:kwargs:
* *language* (``object``) --
The language is set to ``chatterbot.languages.ENG`` for English by default.
"""
def __init__(self, chatbot, **kwargs):
super().__init__(chatbot, **kwargs)
try:
from pint import UnitRegistry
except ImportError:
message = (
'Unable to import "pint".\n'
'Please install "pint" before using the UnitConversion logic adapter:\n'
'pip3 install pint'
)
raise OptionalDependencyImportError(message)
self.language = kwargs.get('language', languages.ENG)
self.cache = {}
self.patterns = [
(
re.compile(r'''
(([Hh]ow\s+many)\s+
(?P<target>\S+)\s+ # meter, celsius, hours
((are)*\s*in)\s+
(?P<number>([+-]?\d+(?:\.\d+)?)|(a|an)|(%s[-\s]?)+)\s+
(?P<from>\S+)\s*) # meter, celsius, hours
''' % (parsing.numbers),
(re.VERBOSE | re.IGNORECASE)
),
lambda m: self.handle_matches(m)
),
(
re.compile(r'''
((?P<number>([+-]?\d+(?:\.\d+)?)|(%s[-\s]?)+)\s+
(?P<from>\S+)\s+ # meter, celsius, hours
(to)\s+
(?P<target>\S+)\s*) # meter, celsius, hours
''' % (parsing.numbers),
(re.VERBOSE | re.IGNORECASE)
),
lambda m: self.handle_matches(m)
),
(
re.compile(r'''
((?P<number>([+-]?\d+(?:\.\d+)?)|(a|an)|(%s[-\s]?)+)\s+
(?P<from>\S+)\s+ # meter, celsius, hours
(is|are)\s+
(how\s+many)*\s+
(?P<target>\S+)\s*) # meter, celsius, hours
''' % (parsing.numbers),
(re.VERBOSE | re.IGNORECASE)
),
lambda m: self.handle_matches(m)
)
]
self.unit_registry = UnitRegistry()
def get_unit(self, unit_variations):
"""
Get the first match unit metric object supported by pint library
given a variation of unit metric names (Ex:['HOUR', 'hour']).
:param unit_variations: A list of strings with names of units
:type unit_variations: str
"""
for unit in unit_variations:
try:
return getattr(self.unit_registry, unit)
except Exception:
continue
return None
def get_valid_units(self, from_unit, target_unit):
"""
Returns the first match `pint.unit.Unit` object for from_unit and
target_unit strings from a possible variation of metric unit names
supported by pint library.
:param from_unit: source metric unit
:type from_unit: str
:param from_unit: target metric unit
:type from_unit: str
"""
from_unit_variations = [from_unit.lower(), from_unit.upper()]
target_unit_variations = [target_unit.lower(), target_unit.upper()]
from_unit = self.get_unit(from_unit_variations)
target_unit = self.get_unit(target_unit_variations)
return from_unit, target_unit
def handle_matches(self, match):
"""
Returns a response statement from a matched input statement.
:param match: It is a valid matched pattern from the input statement
:type: `_sre.SRE_Match`
"""
response = Statement(text='')
from_parsed = match.group("from")
target_parsed = match.group("target")
n_statement = match.group("number")
if n_statement == 'a' or n_statement == 'an':
n_statement = '1.0'
n = mathparse.parse(n_statement, self.language.ISO_639.upper())
from_parsed, target_parsed = self.get_valid_units(from_parsed, target_parsed)
if from_parsed is None or target_parsed is None:
response.confidence = 0.0
else:
from_value = self.unit_registry.Quantity(float(n), from_parsed)
target_value = from_value.to(target_parsed)
response.confidence = 1.0
response.text = str(target_value.magnitude)
return response
def can_process(self, statement):
response = self.process(statement)
self.cache[statement.text] = response
return response.confidence == 1.0
def process(self, statement, additional_response_selection_parameters=None):
response = Statement(text='')
input_text = statement.text
try:
# Use the result cached by the process method if it exists
if input_text in self.cache:
response = self.cache[input_text]
self.cache = {}
return response
for pattern, func in self.patterns:
p = pattern.match(input_text)
if p is not None:
response = func(p)
if response.confidence == 1.0:
break
except Exception:
response.confidence = 0.0
finally:
return response
| bsd-3-clause | -8,994,046,293,773,082,000 | 34.512195 | 88 | 0.527129 | false | 4.217234 | false | false | false | 0.000687 |
python-provy/provy | tests/unit/more/centos/package/test_yum.py | 1 | 8675 | from datetime import datetime, timedelta
import sys
from mock import patch, MagicMock
from nose.tools import istest
from provy.more.centos import YumRole, PackageNotFound
from provy.more.centos.package import yum
from tests.unit.tools.helpers import ProvyTestCase
class YumRoleTest(ProvyTestCase):
def setUp(self):
super(YumRoleTest, self).setUp()
self.role = YumRole(prov=None, context={})
@istest
def installs_necessary_packages_to_provision(self):
with self.mock_role_methods('ensure_up_to_date', 'ensure_package_installed'):
self.role.provision()
self.role.ensure_up_to_date.assert_called_once_with()
self.role.ensure_package_installed.assert_called_once_with('curl')
@istest
def ensures_gpg_key_is_added(self):
with self.execute_mock():
self.role.ensure_gpg_key('http://some.repo')
self.role.execute.assert_called_once_with('curl http://some.repo | rpm --import -', sudo=True, stdout=False)
@istest
def checks_that_repository_exists_in_yum_repos(self):
with self.execute_mock() as execute:
execute.return_value = '''
some
repo
foo-bar
'''
result = self.role.has_source('foo-bar')
self.assertTrue(result)
execute.assert_called_once_with("cat /etc/yum.repos.d/CentOS-Base.repo", sudo=True, stdout=False)
@istest
def checks_that_repository_doesnt_exist_in_apt_source(self):
with self.execute_mock() as execute:
execute.return_value = 'some repo'
result = self.role.has_source('foo-bar')
self.assertFalse(result)
@istest
def ensures_a_source_string_is_added_to_the_repos(self):
source_line = 'foo-bar-repo'
with self.execute_mock() as execute, self.mock_role_method('has_source') as has_source:
has_source.return_value = False
self.assertTrue(self.role.ensure_yum_source(source_line))
self.assertTrue(has_source.called)
execute.assert_called_once_with('echo "{}" >> /etc/yum.repos.d/CentOS-Base.repo'.format(source_line), sudo=True, stdout=False)
@istest
def doesnt_add_source_if_it_already_exists(self):
source_line = 'foo-bar-repo'
with self.execute_mock() as execute, self.mock_role_method('has_source') as has_source:
has_source.return_value = True
self.assertFalse(self.role.ensure_yum_source(source_line))
self.assertFalse(execute.called)
@istest
def gets_update_date_file_as_a_property(self):
with self.mock_role_method('remote_temp_dir'):
self.role.remote_temp_dir.return_value = '/foo/bar'
self.assertEqual(self.role.update_date_file, '/foo/bar/last_yum_update')
@istest
def stores_update_date(self):
with self.mock_role_methods('update_date_file', 'execute'), patch.object(yum, 'datetime') as mock_datetime:
self.role.update_date_file = '/foo/bar'
when = datetime.strptime('2013-01-01', '%Y-%m-%d')
mock_datetime.now.return_value = when
self.role.store_update_date()
self.role.execute.assert_called_once_with('echo "01-01-13 00:00:00" > /foo/bar', stdout=False)
@istest
def gets_last_update_date(self):
with self.mock_role_methods('remote_exists', 'update_date_file', 'read_remote_file'):
self.role.update_date_file = '/foo/bar'
self.role.remote_exists.return_value = True
self.role.read_remote_file.return_value = '01-01-13 00:00:00'
result = self.role.get_last_update_date()
self.assertEqual(result, datetime.strptime('2013-01-01', '%Y-%m-%d'))
self.role.remote_exists.assert_called_once_with(self.role.update_date_file)
self.role.read_remote_file.assert_called_once_with(self.role.update_date_file)
@istest
def gets_none_as_last_update_if_there_was_no_update_yet(self):
with self.mock_role_methods('remote_exists', 'update_date_file', 'read_remote_file'):
self.role.update_date_file = '/foo/bar'
self.role.remote_exists.return_value = False
result = self.role.get_last_update_date()
self.assertIsNone(result)
self.assertFalse(self.role.read_remote_file.called)
@istest
def updates_yum_when_passed_time_limit(self):
with patch.object(yum, 'datetime') as mock_datetime, self.mock_role_methods('get_last_update_date', 'force_update'):
now = datetime.strptime('2013-01-01', '%Y-%m-%d')
then = now - timedelta(minutes=31)
mock_datetime.now.return_value = now
self.role.get_last_update_date.return_value = then
self.role.ensure_up_to_date()
self.role.get_last_update_date.assert_called_once_with()
self.role.force_update.assert_called_once_with()
@istest
def doesnt_update_if_not_passed_from_time_limit(self):
with patch.object(yum, 'datetime') as mock_datetime, self.mock_role_methods('get_last_update_date', 'force_update'):
now = datetime.strptime('2013-01-01', '%Y-%m-%d')
then = now - timedelta(minutes=29)
mock_datetime.now.return_value = now
self.role.get_last_update_date.return_value = then
self.role.ensure_up_to_date()
self.assertFalse(self.role.force_update.called)
@istest
def forces_an_update(self):
with self.mock_role_methods('execute', 'store_update_date'):
self.role.force_update()
self.assertTrue(self.role.context['yum-up-to-date'])
self.role.execute.assert_called_once_with('yum clean all', stdout=False, sudo=True)
self.role.store_update_date.assert_called_once_with()
@istest
def checks_that_a_package_is_installed(self):
with self.execute_mock() as execute:
execute.return_value = '''yes'''
self.assertTrue(self.role.is_package_installed('foo'))
execute.assert_called_once_with('rpm -qa foo', sudo=True, stdout=False)
@istest
def checks_that_a_package_is_not_installed(self):
with self.execute_mock() as execute:
execute.return_value = ''''''
self.assertFalse(self.role.is_package_installed('baz'))
execute.assert_called_once_with('rpm -qa baz', sudo=True, stdout=False)
@istest
def checks_that_a_package_exists(self):
with self.execute_mock() as execute:
self.assertTrue(self.role.package_exists('python'))
execute.assert_called_with('yum info -q python', stdout=False)
@istest
def checks_that_a_package_doesnt_exist(self):
with self.execute_mock() as execute:
execute.return_value = False
self.assertFalse(self.role.package_exists('phyton'))
execute.assert_called_with('yum info -q phyton', stdout=False)
@istest
def traps_sys_exit_when_checking_if_a_package_exists(self):
def exit(*args, **kwargs):
sys.exit(1)
execute = MagicMock(side_effect=exit)
with patch('provy.core.roles.Role.execute', execute):
self.assertFalse(self.role.package_exists('phyton'))
@istest
def checks_if_a_package_exists_before_installing(self):
with self.execute_mock() as execute, self.mock_role_methods('package_exists', 'is_package_installed') as (package_exists, is_package_installed):
is_package_installed.return_value = False
package_exists.return_value = True
result = self.role.ensure_package_installed('python')
self.assertTrue(result)
self.assertTrue(package_exists.called)
execute.assert_called_with('yum install -y python', stdout=False, sudo=True)
@istest
def fails_to_install_package_if_it_doesnt_exist(self):
with self.execute_mock(), self.mock_role_methods('package_exists', 'is_package_installed') as (package_exists, is_package_installed):
is_package_installed.return_value = False
package_exists.return_value = False
self.assertRaises(PackageNotFound, self.role.ensure_package_installed, 'phyton')
self.assertTrue(package_exists.called)
@istest
def doesnt_install_package_if_already_installed(self):
with self.mock_role_method('is_package_installed'):
self.role.is_package_installed.return_value = True
result = self.role.ensure_package_installed('python')
self.assertFalse(result)
| mit | -2,154,357,719,880,740,400 | 38.611872 | 152 | 0.635274 | false | 3.590646 | true | false | false | 0.002651 |
firmlyjin/brython | www/tests/unittests/test/test_genericpath.py | 26 | 12381 | """
Tests common to genericpath, macpath, ntpath and posixpath
"""
import genericpath
import os
import sys
import unittest
import warnings
from test import support
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class GenericTest:
common_attributes = ['commonprefix', 'getsize', 'getatime', 'getctime',
'getmtime', 'exists', 'isdir', 'isfile']
attributes = []
def test_no_argument(self):
for attr in self.common_attributes + self.attributes:
with self.assertRaises(TypeError):
getattr(self.pathmodule, attr)()
raise self.fail("{}.{}() did not raise a TypeError"
.format(self.pathmodule.__name__, attr))
def test_commonprefix(self):
commonprefix = self.pathmodule.commonprefix
self.assertEqual(
commonprefix([]),
""
)
self.assertEqual(
commonprefix(["/home/swenson/spam", "/home/swen/spam"]),
"/home/swen"
)
self.assertEqual(
commonprefix(["/home/swen/spam", "/home/swen/eggs"]),
"/home/swen/"
)
self.assertEqual(
commonprefix(["/home/swen/spam", "/home/swen/spam"]),
"/home/swen/spam"
)
self.assertEqual(
commonprefix(["home:swenson:spam", "home:swen:spam"]),
"home:swen"
)
self.assertEqual(
commonprefix([":home:swen:spam", ":home:swen:eggs"]),
":home:swen:"
)
self.assertEqual(
commonprefix([":home:swen:spam", ":home:swen:spam"]),
":home:swen:spam"
)
self.assertEqual(
commonprefix([b"/home/swenson/spam", b"/home/swen/spam"]),
b"/home/swen"
)
self.assertEqual(
commonprefix([b"/home/swen/spam", b"/home/swen/eggs"]),
b"/home/swen/"
)
self.assertEqual(
commonprefix([b"/home/swen/spam", b"/home/swen/spam"]),
b"/home/swen/spam"
)
self.assertEqual(
commonprefix([b"home:swenson:spam", b"home:swen:spam"]),
b"home:swen"
)
self.assertEqual(
commonprefix([b":home:swen:spam", b":home:swen:eggs"]),
b":home:swen:"
)
self.assertEqual(
commonprefix([b":home:swen:spam", b":home:swen:spam"]),
b":home:swen:spam"
)
testlist = ['', 'abc', 'Xbcd', 'Xb', 'XY', 'abcd',
'aXc', 'abd', 'ab', 'aX', 'abcX']
for s1 in testlist:
for s2 in testlist:
p = commonprefix([s1, s2])
self.assertTrue(s1.startswith(p))
self.assertTrue(s2.startswith(p))
if s1 != s2:
n = len(p)
self.assertNotEqual(s1[n:n+1], s2[n:n+1])
def test_getsize(self):
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertEqual(self.pathmodule.getsize(support.TESTFN), 3)
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
def test_time(self):
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
f = open(support.TESTFN, "ab")
f.write(b"bar")
f.close()
f = open(support.TESTFN, "rb")
d = f.read()
f.close()
self.assertEqual(d, b"foobar")
self.assertLessEqual(
self.pathmodule.getctime(support.TESTFN),
self.pathmodule.getmtime(support.TESTFN)
)
finally:
if not f.closed:
f.close()
support.unlink(support.TESTFN)
def test_exists(self):
self.assertIs(self.pathmodule.exists(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.exists(support.TESTFN), True)
if not self.pathmodule == genericpath:
self.assertIs(self.pathmodule.lexists(support.TESTFN),
True)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
@unittest.skipUnless(hasattr(os, "pipe"), "requires os.pipe()")
def test_exists_fd(self):
r, w = os.pipe()
try:
self.assertTrue(self.pathmodule.exists(r))
finally:
os.close(r)
os.close(w)
self.assertFalse(self.pathmodule.exists(r))
def test_isdir(self):
self.assertIs(self.pathmodule.isdir(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.isdir(support.TESTFN), False)
os.remove(support.TESTFN)
os.mkdir(support.TESTFN)
self.assertIs(self.pathmodule.isdir(support.TESTFN), True)
os.rmdir(support.TESTFN)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
safe_rmdir(support.TESTFN)
def test_isfile(self):
self.assertIs(self.pathmodule.isfile(support.TESTFN), False)
f = open(support.TESTFN, "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(self.pathmodule.isfile(support.TESTFN), True)
os.remove(support.TESTFN)
os.mkdir(support.TESTFN)
self.assertIs(self.pathmodule.isfile(support.TESTFN), False)
os.rmdir(support.TESTFN)
finally:
if not f.close():
f.close()
support.unlink(support.TESTFN)
safe_rmdir(support.TESTFN)
class TestGenericTest(GenericTest, unittest.TestCase):
# Issue 16852: GenericTest can't inherit from unittest.TestCase
# for test discovery purposes; CommonTest inherits from GenericTest
# and is only meant to be inherited by others.
pathmodule = genericpath
# Following TestCase is not supposed to be run from test_genericpath.
# It is inherited by other test modules (macpath, ntpath, posixpath).
class CommonTest(GenericTest):
common_attributes = GenericTest.common_attributes + [
# Properties
'curdir', 'pardir', 'extsep', 'sep',
'pathsep', 'defpath', 'altsep', 'devnull',
# Methods
'normcase', 'splitdrive', 'expandvars', 'normpath', 'abspath',
'join', 'split', 'splitext', 'isabs', 'basename', 'dirname',
'lexists', 'islink', 'ismount', 'expanduser', 'normpath', 'realpath',
]
def test_normcase(self):
normcase = self.pathmodule.normcase
# check that normcase() is idempotent
for p in ["FoO/./BaR", b"FoO/./BaR"]:
p = normcase(p)
self.assertEqual(p, normcase(p))
self.assertEqual(normcase(''), '')
self.assertEqual(normcase(b''), b'')
# check that normcase raises a TypeError for invalid types
for path in (None, True, 0, 2.5, [], bytearray(b''), {'o','o'}):
self.assertRaises(TypeError, normcase, path)
def test_splitdrive(self):
# splitdrive for non-NT paths
splitdrive = self.pathmodule.splitdrive
self.assertEqual(splitdrive("/foo/bar"), ("", "/foo/bar"))
self.assertEqual(splitdrive("foo:bar"), ("", "foo:bar"))
self.assertEqual(splitdrive(":foo:bar"), ("", ":foo:bar"))
self.assertEqual(splitdrive(b"/foo/bar"), (b"", b"/foo/bar"))
self.assertEqual(splitdrive(b"foo:bar"), (b"", b"foo:bar"))
self.assertEqual(splitdrive(b":foo:bar"), (b"", b":foo:bar"))
def test_expandvars(self):
if self.pathmodule.__name__ == 'macpath':
self.skipTest('macpath.expandvars is a stub')
expandvars = self.pathmodule.expandvars
with support.EnvironmentVarGuard() as env:
env.clear()
env["foo"] = "bar"
env["{foo"] = "baz1"
env["{foo}"] = "baz2"
self.assertEqual(expandvars("foo"), "foo")
self.assertEqual(expandvars("$foo bar"), "bar bar")
self.assertEqual(expandvars("${foo}bar"), "barbar")
self.assertEqual(expandvars("$[foo]bar"), "$[foo]bar")
self.assertEqual(expandvars("$bar bar"), "$bar bar")
self.assertEqual(expandvars("$?bar"), "$?bar")
self.assertEqual(expandvars("${foo}bar"), "barbar")
self.assertEqual(expandvars("$foo}bar"), "bar}bar")
self.assertEqual(expandvars("${foo"), "${foo")
self.assertEqual(expandvars("${{foo}}"), "baz1}")
self.assertEqual(expandvars("$foo$foo"), "barbar")
self.assertEqual(expandvars("$bar$bar"), "$bar$bar")
self.assertEqual(expandvars(b"foo"), b"foo")
self.assertEqual(expandvars(b"$foo bar"), b"bar bar")
self.assertEqual(expandvars(b"${foo}bar"), b"barbar")
self.assertEqual(expandvars(b"$[foo]bar"), b"$[foo]bar")
self.assertEqual(expandvars(b"$bar bar"), b"$bar bar")
self.assertEqual(expandvars(b"$?bar"), b"$?bar")
self.assertEqual(expandvars(b"${foo}bar"), b"barbar")
self.assertEqual(expandvars(b"$foo}bar"), b"bar}bar")
self.assertEqual(expandvars(b"${foo"), b"${foo")
self.assertEqual(expandvars(b"${{foo}}"), b"baz1}")
self.assertEqual(expandvars(b"$foo$foo"), b"barbar")
self.assertEqual(expandvars(b"$bar$bar"), b"$bar$bar")
def test_abspath(self):
self.assertIn("foo", self.pathmodule.abspath("foo"))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertIn(b"foo", self.pathmodule.abspath(b"foo"))
# Abspath returns bytes when the arg is bytes
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
for path in (b'', b'foo', b'f\xf2\xf2', b'/foo', b'C:\\'):
self.assertIsInstance(self.pathmodule.abspath(path), bytes)
def test_realpath(self):
self.assertIn("foo", self.pathmodule.realpath("foo"))
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertIn(b"foo", self.pathmodule.realpath(b"foo"))
def test_normpath_issue5827(self):
# Make sure normpath preserves unicode
for path in ('', '.', '/', '\\', '///foo/.//bar//'):
self.assertIsInstance(self.pathmodule.normpath(path), str)
def test_abspath_issue3426(self):
# Check that abspath returns unicode when the arg is unicode
# with both ASCII and non-ASCII cwds.
abspath = self.pathmodule.abspath
for path in ('', 'fuu', 'f\xf9\xf9', '/fuu', 'U:\\'):
self.assertIsInstance(abspath(path), str)
unicwd = '\xe7w\xf0'
try:
os.fsencode(unicwd)
except (AttributeError, UnicodeEncodeError):
# FS encoding is probably ASCII
pass
else:
with support.temp_cwd(unicwd):
for path in ('', 'fuu', 'f\xf9\xf9', '/fuu', 'U:\\'):
self.assertIsInstance(abspath(path), str)
def test_nonascii_abspath(self):
if (support.TESTFN_UNDECODABLE
# Mac OS X denies the creation of a directory with an invalid
# UTF-8 name. Windows allows to create a directory with an
# arbitrary bytes name, but fails to enter this directory
# (when the bytes name is used).
and sys.platform not in ('win32', 'darwin')):
name = support.TESTFN_UNDECODABLE
elif support.TESTFN_NONASCII:
name = support.TESTFN_NONASCII
else:
self.skipTest("need support.TESTFN_NONASCII")
# Test non-ASCII, non-UTF8 bytes in the path.
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
with support.temp_cwd(name):
self.test_abspath()
if __name__=="__main__":
unittest.main()
| bsd-3-clause | -2,680,276,137,108,930,600 | 36.18018 | 77 | 0.552298 | false | 3.888505 | true | false | false | 0.000727 |
oma-deeplearning/deeplearning | python/CRBM.py | 1 | 1853 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
RBM w/ continuous-valued inputs (Linear Energy)
References :
- Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise
Training of Deep Networks, Advances in Neural Information Processing
Systems 19, 2007
"""
import sys
import numpy
from RBM import RBM
from utils import *
class CRBM(RBM):
def propdown(self, h):
pre_activation = numpy.dot(h, self.W.T) + self.vbias
return pre_activation
def sample_v_given_h(self, h0_sample):
a_h = self.propdown(h0_sample)
en = numpy.exp(-a_h)
ep = numpy.exp(a_h)
v1_mean = 1 / (1 - en) - 1 / a_h
U = numpy.array(self.numpy_rng.uniform(
low=0,
high=1,
size=v1_mean.shape))
v1_sample = numpy.log((1 - U * (1 - ep))) / a_h
return [v1_mean, v1_sample]
def test_crbm(learning_rate=0.1, k=1, training_epochs=1000):
data = numpy.array([[0.4, 0.5, 0.5, 0., 0., 0.],
[0.5, 0.3, 0.5, 0., 0., 0.],
[0.4, 0.5, 0.5, 0., 0., 0.],
[0., 0., 0.5, 0.3, 0.5, 0.],
[0., 0., 0.5, 0.4, 0.5, 0.],
[0., 0., 0.5, 0.5, 0.5, 0.]])
rng = numpy.random.RandomState(123)
# construct CRBM
rbm = CRBM(input=data, n_visible=6, n_hidden=5, numpy_rng=rng)
# train
for epoch in range(training_epochs):
rbm.contrastive_divergence(lr=learning_rate, k=k)
# cost = rbm.get_reconstruction_cross_entropy()
# print >> sys.stderr, 'Training epoch %d, cost is ' % epoch, cost
# test
v = numpy.array([[0.5, 0.5, 0., 0., 0., 0.],
[0., 0., 0., 0.5, 0.5, 0.]])
print(rbm.reconstruct(v))
if __name__ == "__main__":
test_crbm()
| gpl-2.0 | -1,301,735,656,881,664,300 | 24.383562 | 74 | 0.504047 | false | 2.820396 | false | false | false | 0.002698 |
jbedorf/tensorflow | tensorflow/python/tools/freeze_graph_test.py | 3 | 13439 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph freezing tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from tensorflow.core.example import example_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.tools import freeze_graph
from tensorflow.python.training import saver as saver_lib
class FreezeGraphTest(test_util.TensorFlowTestCase):
def _testFreezeGraph(self, saver_write_version):
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# We'll create an input graph that has a single variable containing 1.0,
# and that then multiplies it by 2.
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver(write_version=saver_write_version)
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# We save out the graph to disk, and then call the const conversion
# routine.
input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
input_saver_def_path = ""
input_binary = False
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
clear_devices = False
freeze_graph.freeze_graph(
input_graph_path,
input_saver_def_path,
input_binary,
checkpoint_path,
output_node_names,
restore_op_name,
filename_tensor_name,
output_graph_path,
clear_devices,
"",
"",
"",
checkpoint_version=saver_write_version)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def _createTFExampleString(self, feature_name, feature_value):
"""Create a serialized tensorflow example."""
example = example_pb2.Example()
example.features.feature[feature_name].float_list.value.extend([
feature_value])
return example.SerializeToString()
def _writeDummySavedModel(self, path, feature_name):
"""Writes a classifier with two input features to the given path."""
with ops.Graph().as_default():
examples = array_ops.placeholder(dtypes.string, name="input_node")
feature_configs = {
feature_name: parsing_ops.FixedLenFeature(shape=[],
dtype=dtypes.float32),
}
features = parsing_ops.parse_example(examples, feature_configs)
feature = features[feature_name]
variable_node = variables.VariableV1(1.0, name="variable_node")
scores = math_ops.multiply(variable_node, feature, name="output_node")
class_feature = array_ops.fill(array_ops.shape(feature),
"class_%s" % feature_name)
classes = array_ops.transpose(class_feature)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
signature = (
signature_def_utils.classification_signature_def(
examples=examples,
classes=classes,
scores=scores,))
builder = saved_model_builder.SavedModelBuilder(path)
builder.add_meta_graph_and_variables(
sess,
[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature,
},)
builder.save(as_text=True)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV1(self):
self._testFreezeGraph(saver_pb2.SaverDef.V1)
@test_util.run_v1_only("b/120545219")
def testFreezeGraphV2(self):
self._testFreezeGraph(saver_pb2.SaverDef.V2)
def testFreezeMetaGraph(self):
tmp_dir = self.get_temp_dir()
checkpoint_prefix = os.path.join(tmp_dir, "meta_graph_checkpoint")
checkpoint_state_name = "checkpoint_state"
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
with ops.Graph().as_default():
variable_node = variables.VariableV1(1.0, name="variable_node")
output_node = math_ops.multiply(variable_node, 2.0, name="output_node")
sess = session.Session()
init = variables.global_variables_initializer()
sess.run(init)
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
input_saver_def_path = ""
input_binary = True
output_node_names = "output_node"
restore_op_name = "save/restore_all"
filename_tensor_name = "save/Const:0"
clear_devices = False
input_meta_graph = checkpoint_path + ".meta"
freeze_graph.freeze_graph(
"", input_saver_def_path, input_binary, checkpoint_path,
output_node_names, restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "", input_meta_graph)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(4, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
with session.Session() as sess:
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node)
self.assertNear(2.0, output, 0.00001)
def testFreezeSavedModel(self):
tmp_dir = self.get_temp_dir()
saved_model_dir = os.path.join(tmp_dir, "saved_model_dir")
feature_name = "feature"
self._writeDummySavedModel(saved_model_dir, feature_name)
output_graph_filename = os.path.join(tmp_dir, "output_graph.pb")
input_saved_model_dir = saved_model_dir
output_node_names = "output_node"
input_binary = False
input_saver_def_path = False
restore_op_name = None
filename_tensor_name = None
clear_devices = False
input_meta_graph = False
checkpoint_path = None
input_graph_filename = None
saved_model_tags = tag_constants.SERVING
freeze_graph.freeze_graph(input_graph_filename, input_saver_def_path,
input_binary, checkpoint_path, output_node_names,
restore_op_name, filename_tensor_name,
output_graph_filename, clear_devices, "", "", "",
input_meta_graph, input_saved_model_dir,
saved_model_tags)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_filename, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(8, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("VariableV2", node.op)
self.assertNotEqual("Variable", node.op)
feature_value = 2.0
example = self._createTFExampleString(feature_name, feature_value)
with session.Session() as sess:
input_node = sess.graph.get_tensor_by_name("input_node:0")
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node, feed_dict={input_node: [example]})
self.assertNear(feature_value, output, 0.00001)
def testSinglePartitionedVariable(self):
"""Ensures partitioned variables fail cleanly with freeze graph."""
checkpoint_prefix = os.path.join(self.get_temp_dir(), "saved_checkpoint")
checkpoint_state_name = "checkpoint_state"
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# Create a graph with partition variables. When weights are partitioned into
# a single partition, the weights variable is followed by a identity ->
# identity (an additional identity node).
partitioner = partitioned_variables.fixed_size_partitioner(1)
with ops.Graph().as_default():
with variable_scope.variable_scope("part", partitioner=partitioner):
batch_size, height, width, depth = 5, 128, 128, 3
input1 = array_ops.zeros(
(batch_size, height, width, depth), name="input1")
input2 = array_ops.zeros(
(batch_size, height, width, depth), name="input2")
num_nodes = depth
filter1 = variable_scope.get_variable("filter", [num_nodes, num_nodes])
filter2 = array_ops.reshape(filter1, [1, 1, num_nodes, num_nodes])
conv = nn.conv2d(
input=input1, filter=filter2, strides=[1, 1, 1, 1], padding="SAME")
node = math_ops.add(conv, input2, name="test/add")
node = nn.relu6(node, name="test/relu6")
# Save graph and checkpoints.
sess = session.Session()
sess.run(variables.global_variables_initializer())
saver = saver_lib.Saver()
checkpoint_path = saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# Ensure this graph has partition variables.
self.assertTrue([
tensor.name.split(":")[0]
for op in sess.graph.get_operations()
for tensor in op.values()
if re.search(r"/part_\d+/", tensor.name)
])
# Test freezing graph doesn't make it crash.
output_node_names = "save/restore_all"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
return_value = freeze_graph.freeze_graph_with_def_protos(
input_graph_def=sess.graph_def,
input_saver_def=None,
input_checkpoint=checkpoint_path,
output_node_names=output_node_names,
restore_op_name="save/restore_all", # default value
filename_tensor_name="save/Const:0", # default value
output_graph=output_graph_path,
clear_devices=False,
initializer_nodes="")
self.assertTrue(return_value, -1)
if __name__ == "__main__":
test.main()
| apache-2.0 | -5,676,305,373,818,004,000 | 39.357357 | 80 | 0.659424 | false | 3.769705 | true | false | false | 0.004911 |
microelly2/geodata | geodat/navigator.py | 1 | 27716 | '''navigation in 3D'''
# -*- coding: utf-8 -*-
#-------------------------------------------------
#-- event filter next germ + navigator
#--
#-- microelly 2016
#--
#-- GNU Lesser General Public License (LGPL)
#-------------------------------------------------
#http://doc.qt.io/qt-5/qt.html#Key-enum
#http://doc.qt.io/qt-5/qevent.html#Type-enum
#http://doc.qt.io/qt-5/qcolor.html#setNamedColor
#http://doc.qt.io/qt-5/richtext-html-subset.html
from geodat.say import *
import PySide
from PySide import QtGui,QtCore
import FreeCAD,FreeCADGui
#\cond
App=FreeCAD
Err=FreeCAD.Console.PrintError
Msg=FreeCAD.Console.PrintMessage
import FreeCADGui
from PySide import QtGui
from pivy import coin
import sys
from PySide import QtGui, QtCore
import os
#\endcond
import time,sys,traceback,math
from pivy import coin
'''
def sayexc(mess='',last=False):
exc_type, exc_value, exc_traceback = sys.exc_info()
ttt=repr(traceback.format_exception(exc_type, exc_value,exc_traceback))
lls=eval(ttt)
if last:
lls=[lls[-1]]
Err(mess + "\n" +"--> ".join(lls))
'''
# whenever the module is loaded stop an old eventserver
try:
stop()
except:
pass
## the debug window for runtime parameter
def myDebugWidget():
liste=QtGui.QWidget()
liste.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
layout=QtGui.QVBoxLayout()
liste.setLayout(layout)
liste.vmap={}
for k in ['key','xa','ya','za','xr','yr','zr','dxw','dyw','click','clickcount' ]:
line = QtGui.QLineEdit()
line.setText("("+k+")")
layout.addWidget(line)
liste.vmap[k]=line
for k in ['windows']:
line = QtGui.QTextEdit()
line.setText("("+k+")")
layout.addWidget(line)
liste.vmap[k]=line
bt= QtGui.QPushButton()
bt.setText("Ende")
bt.clicked.connect(stop)
layout.addWidget(bt)
liste.show()
return liste
##callback when a key is pressed
def on_key_press(ef,keystring):
print("on_key_press:", keystring)
if keystring=='Escape':
print("stoppe eventserver ...")
ef.output.hide()
stop()
return True
##callback when a key is released
def on_key_release(ef,keystring):
print("on_key_release:", keystring)
return True
## The EventFilter controls the Qt mouse and keybord events
#
class EventFilter(QtCore.QObject):
#\cond
def __init__(self):
QtCore.QObject.__init__(self)
self.lastpos=None
self.on_key_press=on_key_press
self.on_key_release=on_key_release
self.on_move=on_move
self.on_clicks=on_clicks
self.on_windowslist=on_windowslist
self.keyTimeout=0.1
self.keyset=0
self.keyPressed2=False
self.output=myDebugWidget()
self.keymap={}
for t in dir(QtCore.Qt):
if t.startswith( 'Key_' ):
v=eval('QtCore.Qt.'+t)
self.keymap[v]=t[4:]
self.modmap={}
for t in dir(QtCore.Qt):
if t.endswith('Modifier'):
if t!= 'Modifier':
v=eval('QtCore.Qt.'+t)
self.modmap[v]=t[:-8]
#\endcond
## the event handler
#
def eventFilter(self, o, e):
# http://doc.qt.io/qt-5/qevent.html
z=str(e.type())
# not used events
if z == 'PySide.QtCore.QEvent.Type.ChildAdded' or \
z == 'PySide.QtCore.QEvent.Type.ChildRemoved'or \
z == 'PySide.QtCore.QEvent.Type.User' or \
z == 'PySide.QtCore.QEvent.Type.Paint' or \
z == 'PySide.QtCore.QEvent.Type.LayoutRequest' or\
z == 'PySide.QtCore.QEvent.Type.UpdateRequest' :
return QtGui.QWidget.eventFilter(self, o, e)
if z == 'PySide.QtCore.QEvent.Type.KeyPress':
if time.time()-self.keyset<self.keyTimeout:
return True
self.keyPressed2=True
self.keyset=time.time()
ks=''
for k in self.modmap:
if e.modifiers() & k:
ks += self.modmap[k] + '-'
if not self.keymap[e.key()] in ['Shift','Meta','Alt','Control','GroupSwitch']:
ks +=self.keymap[e.key()]
self.output.vmap['key'].setText(ks)
return self.on_key_press(self,ks)
# end of a single key pressed
if z == 'PySide.QtCore.QEvent.Type.KeyRelease':
if self.keyPressed2:
self.keyPressed2=False
self.keyset=0
ks=''
for k in self.modmap:
if e.modifiers() & k:
ks += self.modmap[k] + '-'
ks +=self.keymap[e.key()]
self.output.vmap['key'].setText(ks)
return self.on_key_release(self,ks)
# enter and leave a widget
if z == 'PySide.QtCore.QEvent.Type.Enter' or z == 'PySide.QtCore.QEvent.Type.Leave':
#FreeCAD.Console.PrintMessage("Enter Leave\n")
return True
if z == 'PySide.QtCore.QEvent.Type.HoverMove' :
if False:
FreeCAD.Console.PrintMessage("old Pos: ")
FreeCAD.Console.PrintMessage(e.oldPos())
FreeCAD.Console.PrintMessage(", new Pos: ")
FreeCAD.Console.PrintMessage(e.pos())
FreeCAD.Console.PrintMessage("\n")
self.lastpos=e.pos()
try: za=int(self.output.vmap['za'].text())
except: za=0
za2=za
self.output.vmap['xa'].setText(str(e.pos().x()))
self.output.vmap['ya'].setText(str(e.pos().y()))
#return self.on_move(self,[e.pos().x(),e.pos().y(),za2],[99,99,99])
return self.on_move(self,[e.pos().x(),e.pos().y(),za2],[e.pos().x(),e.pos().y(),0])
try:
if e.type() == QtCore.QEvent.ContextMenu and o.__class__ == QtGui.QWidget:
# hier contextmenue rechte maus auschalten
# FreeCAD.Console.PrintMessage('!! cancel -------------------------------------context-----------\n')
return True
pass
# wheel rotation
if e.type()== QtCore.QEvent.Type.Wheel:
# http://doc.qt.io/qt-4.8/qwheelevent.html
self.output.vmap['xr'].setText(str(e.x()))
self.output.vmap['yr'].setText(str(e.y()))
self.output.vmap['zr'].setText(str(e.delta()))
self.output.vmap['xa'].setText(str(e.globalX()))
self.output.vmap['ya'].setText(str(e.globalY()))
try: za=int(self.output.vmap['za'].text())
except: za=0
za2=za+int(e.delta())
self.output.vmap['za'].setText(str(za2))
return self.on_move(self,[e.globalX(),e.globalY(),za2],[e.x(),e.y(),e.delta()] )
# mouse clicks
if e.type() == QtCore.QEvent.MouseButtonPress or \
e.type() == QtCore.QEvent.MouseButtonRelease or\
e.type() == QtCore.QEvent.MouseButtonDblClick:
windowlist=[]
myclass=o.__class__.__name__
try:
mytext=o.text()
except:
mytext="???"
if myclass=='QTabBar':
windowlist.append([myclass,str(o.tabText(o.currentIndex())),o.currentIndex()])
else:
windowlist.append([myclass,str(mytext)])
self.output.vmap['dxw'].setText(str(o.width()))
self.output.vmap['dyw'].setText(str(o.height()))
widget = QtGui.qApp.widgetAt(self.lastpos)
if widget:
while widget:
try:
p=widget
# Msg("widget "+ p.objectName()+"!\n")
if p.__class__.__name__ =='QMdiSubWindow':
widget=None
# gefunden
# Msg('\n')
label='???'
try:
# Msg( p.__class__.__name__ +" objectName:" + p.objectName()+ "\n" )
label2=p.objectName()
if label2!='': label=label2
except: pass
try:
# Msg( p.__class__.__name__ +" windowTitle" + p.windowTitle()+ "\n" )
label2=p.windowTitle()
if label2!='': label=label2
except: pass
try:
# Msg( p.__class__.__name__ +" tabTExt" + p.tabText()+ "\n" )
label2=p.tabText()
if label2!='': label=label2
except: pass
windowlist.append([p.__class__.__name__ ,str(label)])
p=widget.parent()
widget=p
except:
widget=None
stack=''
for t in windowlist:
stack += str(t)+"\n"
self.output.vmap['xr'].setText(str(e.pos().x()))
self.output.vmap['yr'].setText(str(e.pos().y()))
self.output.vmap['windows'].setText(stack)
self.windowlist=windowlist
self.on_windowslist(self,windowlist)
if e.type() == QtCore.QEvent.MouseButtonRelease:
self.output.vmap['clickcount'].setText('release')
return self.on_clicks(self,'Release',0)
return True
# double clicked
if e.type() == QtCore.QEvent.MouseButtonDblClick and e.button() == QtCore.Qt.LeftButton:
self.output.vmap['click'].setText('left')
self.output.vmap['clickcount'].setText('2')
return True
if e.type() == QtCore.QEvent.MouseButtonDblClick and e.button() == QtCore.Qt.RightButton:
self.output.vmap['click'].setText('right')
self.output.vmap['clickcount'].setText('2')
return True
if e.type() == QtCore.QEvent.MouseButtonDblClick and e.button() == QtCore.Qt.MiddleButton:
self.output.vmap['click'].setText('middle')
self.output.vmap['clickcount'].setText('2')
return True
# middle
if e.button() == QtCore.Qt.MidButton or e.button() == QtCore.Qt.MiddleButton:
self.output.vmap['click'].setText('middle')
self.output.vmap['clickcount'].setText('1')
# kontextmenu abfangen -> return True !
return True
if e.button() == QtCore.Qt.LeftButton:
FreeCAD.Console.PrintMessage('!Mouse one left\n')
self.output.vmap['click'].setText('left')
self.output.vmap['clickcount'].setText('1')
return self.on_clicks(self,'Left',1)
# return True
# right mouse button when context menue deactivated
elif e.button() == QtCore.Qt.RightButton:
self.output.vmap['click'].setText('right')
self.output.vmap['clickcount'].setText('1')
# kontextmenu abfangen -> return True !
return self.on_clicks(self,'Right',1)
# return True
except:
sayexc()
return False
## stop and delete the EventFilter
#
def stop():
mw=QtGui.qApp
ef=FreeCAD.eventfilter
mw.removeEventFilter(ef)
#mw.setOverrideCursor(QtCore.Qt.SizeAllCursor)
mw.setOverrideCursor(QtCore.Qt.ArrowCursor)
# FreeCADGui.activateWorkbench("Geodat")
sg = FreeCADGui.ActiveDocument.ActiveView.getSceneGraph()
ef.output.deleteLater()
ef.navi.deleteLater()
sg.removeChild(ef.background)
def keypress(ef,keystring):
camera=FreeCAD.ActiveDocument.Wedge
if keystring=='X':
camera.Placement.Base.x += 10
if keystring=='Y':
camera.Placement.Base.y += 10
if keystring=='Z':
camera.Placement.Base.z += 10
ax=camera.Placement.Rotation.Axis
an=camera.Placement.Rotation.Angle
an=an* 180/math.pi
[y,p,r]=camera.Placement.Rotation.toEuler()
if keystring=='G':
y += 0.1
camera.Placement.Rotation=FreeCAD.Rotation(y,p,r)
if keystring=='H':
p += 0.1
camera.Placement.Rotation=FreeCAD.Rotation(y,p,r)
if keystring=='F':
r += 0.1
camera.Placement.Rotation=FreeCAD.Rotation(y,p,r)
if keystring=='C':
camera.Placement=FreeCAD.Placement()
FreeCAD.activeDocument().recompute()
if keystring=='Escape':
print("stoppe eventserver ...")
ef.output.hide()
stop()
return True
def on_keypress2(ef,keystring):
try:
camera=FreeCADGui.activeDocument().activeView().getCameraNode()
# # Hilfspunkt kameraposition
# c=App.ActiveDocument.Vertex
#
# # Hud
# panel=App.ActiveDocument.Compound
# # Kugel im HUD
# s=App.ActiveDocument.Sphere001
if ef.firstCall:
FreeCADGui.activeDocument().activeView().setCameraType("Perspective")
ef.firstCall=False
campos=FreeCAD.Vector( 0, 0, 0)
camera.position.setValue(campos)
nD=100
fD=12000000
camera.nearDistance.setValue(nD)
camera.farDistance.setValue(fD)
if keystring=='X' or keystring=='Insert':
ef.campos.x += 10
if keystring=='Y'or keystring=='Home' :
ef.campos.y += 10
if keystring=='Z'or keystring=='PageUp':
ef.campos.z += 10
if keystring=='Shift-X'or keystring=='Delete':
ef.campos.x -= 10
if keystring=='Shift-Y'or keystring=='End':
ef.campos.y -= 10
if keystring=='Shift-Z'or keystring=='PageDown':
ef.campos.z -= 10
if keystring=='F12':
ef.campos = FreeCAD.Vector( 0, 0, 0)
ef.laenge=0
ef.breite=0
ef.roll=0
if keystring=='Control-Left':
ef.roll += 10
if keystring=='Control-Right':
ef.roll -= 10
if keystring=='Control-Down':
ef.roll = 0
if ef.mode=='turn':
if keystring=='Up':
ef.breite += 1.0
if keystring=='Down':
ef.breite -= 1.0
if keystring=='Shift-Up' or keystring=='Shift-Down':
ef.breite=-ef.breite
if ef.laenge <=0:
ef.laenge += 180
else:
ef.laenge -= 180
if keystring=='Left':
ef.laenge -= 1.1
if keystring=='Right':
ef.laenge += 1.2
if keystring=='Shift-Left' or keystring=='Shift-Right':
if ef.laenge <=0:
ef.laenge += 180
else:
ef.laenge -= 180
elif ef.mode=='walk':
Msg('walk mode')
if keystring=='Left':
ef.direction -= 0.1
ef.laenge= -90+ef.direction*180/math.pi
if keystring=='Right':
ef.direction += 0.1
ef.laenge= -90+ef.direction*180/math.pi
if keystring=='Up':
ef.campos.x -= ef.speed*math.cos(ef.direction)
ef.campos.y += ef.speed*math.sin(ef.direction)
ef.campos.z += ef.speed*math.sin(ef.breite/180*math.pi)
if keystring=='Down':
ef.campos.x += ef.speed*math.cos(ef.direction)
ef.campos.y -= ef.speed*math.sin(ef.direction)
ef.campos.z -= ef.speed*math.sin(ef.breite/180*math.pi)
if keystring=='Return':
pass
elif ef.mode=='xyz':
Err('xyz mode')
if keystring=='Up':
ef.campos.z += ef.speed*math.cos(math.pi*ef.roll/180)
if keystring=='Down':
ef.campos.z -= ef.speed*math.cos(math.pi*ef.roll/180)
# if keystring=='Up':
# ef.campos.x += ef.speed*math.cos(ef.direction)
# ef.campos.y += ef.speed*math.sin(ef.direction)
# if keystring=='Down':
# ef.campos.x -= ef.speed*math.cos(ef.direction)
# ef.campos.y -= ef.speed*math.sin(ef.direction)
if keystring=='Left':
ef.campos.y += ef.speed*math.sin(0.0+ef.laenge/180*math.pi)
ef.campos.x -= ef.speed*math.cos(0.0+ef.laenge/180*math.pi)
if keystring=='Right':
ef.campos.y -= ef.speed*math.sin(0.0+ef.laenge/180*math.pi)
ef.campos.x += ef.speed*math.cos(0.0+ef.laenge/180*math.pi)
else:
Err("no known mode -- no action")
ef.compass.direction(ef.laenge)
ef.horizon.direction(ef.roll)
ef.horizon.setnick(ef.breite)
r=1000
pos3=FreeCAD.Vector(
r*math.sin(ef.laenge/180*math.pi)*math.cos(ef.breite/180*math.pi),
r*math.cos(ef.laenge/180*math.pi)*math.cos(ef.breite/180*math.pi),
r*math.sin(ef.breite/180*math.pi))
dir=FreeCAD.Vector(pos3)# .sub(ef.campos)
dir.normalize()
print(ef.direction)
print("ef.campos", ef.campos)
ef.map.setPos(ef.campos.x,ef.campos.y,ef.campos.z)
spos=FreeCAD.Vector(ef.campos)
d=200
prpos=FreeCAD.Vector(d*dir.x,d*dir.y,d*dir.z)
ppos=spos.add(prpos)
# kamera position
# c.Placement.Base=ef.campos
camera.position.setValue(ef.campos)
camera.pointAt(coin.SbVec3f(ppos),coin.SbVec3f(0,0.0+math.sin(math.pi*ef.roll/180),0.0+math.cos(math.pi*ef.roll/180)))
print("Roll ", ef.roll)
# #hud
# panel.Placement.Base=ppos
# panel.Placement.Rotation=FreeCAD.Rotation(ef.laenge,-ef.breite,0)
# #drehung des kompass/horizonts
# s.Placement.Rotation=FreeCAD.Rotation(-ef.laenge-90,0,ef.breite)
#
# kamera einstellungen
#
if keystring=='F9':
a=camera.heightAngle.getValue()
a += 0.01
camera.heightAngle.setValue(a)
if keystring=='F10':
a=camera.heightAngle.getValue()
a -= 0.01
camera.heightAngle.setValue(a)
if keystring=='F11':
camera.heightAngle.setValue(0.785398185253)
if keystring=='F5':
nD=camera.nearDistance.getValue()
nD *=1.03
print("near Distance",nD)
camera.nearDistance.setValue(nD)
if keystring=='F6':
nD=camera.nearDistance.getValue()
nD /=1.03
if nD >0:
print("near Distance",nD)
camera.nearDistance.setValue(nD)
if keystring=='F2':
fn='/home/microelly2/FCB/b175_camera_controller/P1170438.JPG'
ef.tex.filename = fn
if keystring=='F3':
fn='/home/microelly2/FCB/b175_camera_controller/P1170039.JPG'
ef.tex.filename = fn
if keystring=='F4':
fn='/home/microelly2/FCB/b175_camera_controller/winter.jpg'
ef.tex.filename = fn
#
# ausgabe daten
#
if 1 or keystring=='F2':
t=FreeCAD.Vector(prpos)
try:
t.normalize()
except:
pass
campos2=(round(ef.campos[0]),round(ef.campos[1]),round(ef.campos[2]))
nD=camera.nearDistance.getValue()
a=camera.heightAngle.getValue()
out=''
out += "camera position " + str(campos2) +"\n"
out += "camera direction " + str([round(t.x,2),round(t.y,2),round(t.z,2)]) + "\n"
out += "speed " + str(ef.speed) +"\n"
out += "dir " + str(round(ef.direction*180/math.pi)) +"\n"
out += '\n'
out += "height Angle " + str(round(a/math.pi*180)) +'\n'
out += "focal length " + str(round(10/math.tan(a/2)))+"\n"
out += "near Distance " + str(round(nD)) + '\n'
print(out)
ef.navi.output.setText(out)
FreeCAD.ActiveDocument.recompute()
FreeCADGui.updateGui()
if keystring=='Escape':
print("stoppe eventserver ...")
stop()
sg = FreeCADGui.ActiveDocument.ActiveView.getSceneGraph()
ef.output.deleteLater()
ef.navi.deleteLater()
sg.removeChild(ef.background)
except:
sayexc()
stop()
return True
def on_move(ef,globalVector,localVector):
return True
def on_move2(ef,globalVector,localVector):
if ef.mouseMode:
d=3
if ef.v:
if ef.v[0]>globalVector[0]+d:
ef.on_key_press(ef,"Left")
elif ef.v[0]<globalVector[0]-d:
ef.on_key_press(ef,"Right")
if ef.v[1]>globalVector[1]+d:
ef.on_key_press(ef,"Up")
elif ef.v[1]<globalVector[1]-d:
ef.on_key_press(ef,"Down")
ef.v=globalVector
return True
def on_move3(ef,globalVector,localVector):
return True
## the old click callback
def on_clicks(ef,button,count):
print("on_mouse:", button, str(count))
return True
def on_clicks2(ef,button,count):
print("on_clicks2:", button, str(count))
if button=='Release':
ef.mouseMode=False
if button=='Left':
ef.mouseMode=True
ef.v=None
return True
## click callback for debug
def on_clicks3(ef,button,count):
print("on clicks 3",button)
print(ef.windowlist)
try:
if ef.windowlist[0][1]=='Testme':
print("call HUHU")
return False
except:
return True
## a widget to display the yaw direction inside a circle
class Compass(QtGui.QWidget):
#\cond
def __init__(self):
super(Compass, self).__init__()
self.rect= (0, 0, 100, 100)
self.arc=90
self.resize(150, 150)
#self.update()
#self.initUI()
def initUI(self):
self.setGeometry(300, 300, 350, 100)
self.setWindowTitle('Colors')
#self.show()
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
self.drawRectangles(qp)
qp.end()
def drawRectangles(self, qp):
color = QtGui.QColor(0, 0, 0)
color.setNamedColor('#d4d4d4')
qp.setPen(color)
qp.setBrush(QtGui.QColor(100, 0, 0,50))
qp.drawEllipse(0, 0, 100, 100);
qp.save();
qp.translate(50,50);
qp.rotate(self.arc);
qp.setBrush(QtGui.QColor(255, 0, 0, 255))
qp.drawRect(0, -3, 50, 6);
qp.restore();
def direction(self,arc):
self.arc=arc-90
self.repaint()
#\endcond
## a widget to display the pitch of the view
class Horizon(QtGui.QWidget):
#\cond
def __init__(self):
super(Horizon, self).__init__()
self.rect= (0, 0, 100, 100)
self.arc=0
self.nick=0
self.resize(100, 100)
def initUI(self):
self.setGeometry(300, 300, 350, 100)
self.setWindowTitle('Colors')
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
self.drawRectangles(qp)
qp.end()
def drawRectangles(self, qp):
color = QtGui.QColor(0, 0, 0)
color.setNamedColor('#d4d4d4')
qp.setBrush(QtGui.QColor(100, 100, 100, 255))
qp.drawEllipse(0, 0, 100, 100);
qp.setPen(color)
qp.setBrush(QtGui.QColor(220, 220, 255,200))
rect = QtCore.QRectF(0.0, 0.0, 100.0, 100.0)
startAngle = (90+self.arc-0.5*self.nick) * 16
spanAngle = (self.nick) * 16
qp.drawChord(rect, startAngle, spanAngle)
def direction(self,arc):
self.arc=arc
self.repaint()
def setnick(self,n):
self.nick=-n-180
self.repaint()
#\endcond
## a widget to dispay the xy position of the camera in the scene
class Map(QtGui.QWidget):
def __init__(self):
super(Map, self).__init__()
self.rect= (0, 0, 100, 100)
self.x=50
self.y=50
self.z=50
self.resize(150, 140)
#self.update()
#self.initUI()
def initUI(self):
self.setGeometry(300, 300, 350, 105)
self.setWindowTitle('Colors')
#self.show()
def paintEvent(self, e):
qp = QtGui.QPainter()
qp.begin(self)
self.drawRectangles(qp)
qp.end()
def drawRectangles(self, qp):
color = QtGui.QColor(0, 0, 0)
color.setNamedColor('#d4d4d4')
qp.setPen(color)
qp.setBrush(QtGui.QColor(100, 0, 0,50))
qp.drawRect(0, 0, 105, 105);
qp.save();
qp.translate(self.x,self.y);
qp.setBrush(QtGui.QColor(255, 0, 0, 255))
qp.drawRect(0, 0, 5, 5);
# qp.save();
qp.translate(-self.x,-self.y+self.z);
qp.setBrush(QtGui.QColor(255, 255, 0, 255))
qp.drawRect(0, 0, 10, 5);
qp.restore();
# qp.restore();
def setPos(self,x,y,z):
fak=50.0
self.z=-z/fak+50
self.x=x/fak+50
self.y=-y/fak+50
print("setpos",x,y)
self.repaint()
##creates and returns the navigator display widget
def myNavigatorWidget(ef):
liste=QtGui.QWidget()
liste.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
layout=QtGui.QVBoxLayout()
liste.setLayout(layout)
liste.vmap={}
# for k in ['key','xa','ya','za','xr','yr','zr','dxw','dyw','click','clickcount' ]:
# line = QtGui.QLineEdit()
# line.setText("("+k+")")
# layout.addWidget(line)
# liste.vmap[k]=line
# for k in ['windows']:
# line = QtGui.QTextEdit()
# line.setText("("+k+")")
# layout.addWidget(line)
# liste.vmap[k]=line
# liste.setGeometry(100, 100, 250, 1250)
liste2=QtGui.QWidget()
layout2=QtGui.QHBoxLayout()
liste2.setLayout(layout2)
layout.addWidget(liste2)
liste2.setMinimumHeight(130)
liste2.setMinimumWidth(360)
# drei Anzeiger ...
# compass
ex = Compass()
layout2.addWidget(ex)
ex.direction(-50)
ef.compass=ex
# horizon
ex2 = Horizon()
ex2.setnick(100)
ex2.direction(20)
layout2.addWidget(ex2)
ef.horizon=ex2
# ex2.direction(50)
# speed
ex3 = Map()
layout2.addWidget(ex3)
ex3.setPos(20,40,20)
ef.map=ex3
ll= QtGui.QLabel()
ll.setText("Turn")
layout.addWidget(ll)
liste.modelabel=ll
bt= QtGui.QPushButton()
bt.setText("Walk Mode")
layout.addWidget(bt)
bt= QtGui.QPushButton()
bt.setText("Frontal Mode")
layout.addWidget(bt)
bt= QtGui.QPushButton()
bt.setText("Turn Mode")
layout.addWidget(bt)
line = QtGui.QTextEdit()
line.setText("yyy")
layout.addWidget(line)
liste.output=line
bt= QtGui.QPushButton()
bt.setText("Stop Navigation")
layout.addWidget(bt)
# bt= QtGui.QPushButton()
# bt.setText("Testme")
# layout.addWidget(bt)
# bt.clicked.connect(huhu)
bt= QtGui.QPushButton()
bt.setText("Background 1 Snowland")
layout.addWidget(bt)
bt.clicked.connect(lambda:background1(ef))
bt= QtGui.QPushButton()
bt.setText("Background 2 Duneland")
layout.addWidget(bt)
bt.clicked.connect(lambda:background2(ef))
bt= QtGui.QPushButton()
bt.setText("Background 3 Cologne")
layout.addWidget(bt)
bt.clicked.connect(lambda:background3(ef))
bt= QtGui.QPushButton()
bt.setText("Background 4 Transparence")
layout.addWidget(bt)
bt.clicked.connect(lambda:background4(ef))
liste.ef=ef
liste.show()
return liste
## background image winter
def background1(ef):
fn='/home/microelly2/FCB/b175_camera_controller/winter.jpg'
fn=os.path.dirname(__file__) +"/../pics/winter.jpg"
ef.tex.filename = fn
## background image dune
def background2(ef):
fn='/home/microelly2/FCB/b175_camera_controller/P1170437.JPG'
fn=os.path.dirname(__file__) +"/../pics//P1170437.JPG"
ef.tex.filename = fn
## background image city
def background3(ef):
fn='/home/microelly2/FCB/b175_camera_controller/P1170039.JPG'
fn=os.path.dirname(__file__) +"/../pics/P1170039.JPG"
ef.tex.filename = fn
## background partially transparent
def background4(ef):
fn='/home/microelly2/FCB/b175_camera_controller/transpa.png'
fn=os.path.dirname(__file__) +"/../pics/transpa.png"
ef.tex.filename = fn
def on_windowslist(ef,windowslist):
return True
## callback to set the mode or to do some other useful things
def on_windowslist2(ef,windowslist):
for t in windowslist:
if t==['QPushButton','Stop Navigation']:
stop()
ef.output.deleteLater()
ef.navi.deleteLater()
if t==['QPushButton','Walk Mode']:
print("Walk mode")
ef.mode="walk"
ef.navi.modelabel.setText("Walk")
if t==['QPushButton','Frontal Mode']:
print("Frontal mode")
ef.mode="xyz"
ef.navi.modelabel.setText("Frontal")
if t==['QPushButton','Turn Mode']:
print("Turn mode")
ef.mode="turn"
ef.navi.modelabel.setText("Turn")
return
## initialize and start the Eventfilter
def navi():
'''navigator startup'''
mw=QtGui.qApp
#widget.setCursor(QtCore.Qt.SizeAllCursor)
#cursor ausblenden
#mw.setOverrideCursor(QtCore.Qt.BlankCursor)
# FreeCADGui.activateWorkbench("NoneWorkbench")
mw.setOverrideCursor(QtCore.Qt.PointingHandCursor)
ef=EventFilter()
ef.laenge=0.0
ef.breite=0.0
ef.campos=FreeCAD.Vector( 0, 0, 20000)
# ef.output.hide()
ef.mouseMode=False
ef.firstCall=True
ef.mode="turn"
ef.navi=myNavigatorWidget(ef)
ef.speed=100
ef.direction=0.5*math.pi
ef.roll=0
#--------------
# get a jpg filename
# jpgfilename = QtGui.QFileDialog.getOpenFileName(QtGui.qApp.activeWindow(),'Open image file','*.jpg')
fn='/home/microelly2/FCB/b175_camera_controller/winter.jpg'
fn=os.path.dirname(__file__) +"/../pics/winter.jpg"
sg = FreeCADGui.ActiveDocument.ActiveView.getSceneGraph()
col = coin.SoBaseColor()
#col.rgb=(1,0,0)
trans = coin.SoTranslation()
trans.translation.setValue([0,0,0])
myCustomNode = coin.SoSeparator()
#myCustomNode.addChild(col)
if 0 or False:
cub = coin.SoCylinder()
cub.radius.setValue(3000)
cub.height.setValue(4000)
cub.parts.set("SIDES")
s=coin.SoRotationXYZ()
s.angle.setValue(1.5708)
s.axis.setValue(0)
myCustomNode.addChild(s)
s=coin.SoRotationXYZ()
s.angle.setValue(math.pi)
s.axis.setValue(1)
myCustomNode.addChild(s)
else:
cub = coin.SoSphere()
cub.radius.setValue(10000000)
s=coin.SoRotationXYZ()
s.angle.setValue(1.5708)
s.axis.setValue(0)
myCustomNode.addChild(s)
s=coin.SoRotationXYZ()
s.angle.setValue(math.pi)
s.axis.setValue(1)
myCustomNode.addChild(s)
if False:
l=coin.SoDirectionalLight()
l.direction.setValue(coin.SbVec3f(0,1,0))
l.color.setValue(coin.SbColor(0,0,1))
myCustomNode.addChild(l)
l=coin.SoDirectionalLight()
l.direction.setValue(coin.SbVec3f(0,-1,0))
l.color.setValue(coin.SbColor(0,1,1))
myCustomNode.addChild(l)
l=coin.SoDirectionalLight()
l.direction.setValue(coin.SbVec3f(0,0,1))
l.color.setValue(coin.SbColor(1,0,0))
myCustomNode.addChild(l)
l=coin.SoDirectionalLight()
l.direction.setValue(coin.SbVec3f(0,0,-1))
l.color.setValue(coin.SbColor(0.6,0.6,1))
myCustomNode.addChild(l)
l=coin.SoSpotLight()
l.direction.setValue(coin.SbVec3f(1,0,1))
l.color.setValue(coin.SbColor(0,1,0))
l.location.setValue(coin.SbVec3f(0,0,0))
# l.cutOffAngle.setValue(0.01)
# l.dropOffRate.setValue(1)
myCustomNode.addChild(l)
#myCustomNode.addChild(trans)
myCustomNode.addChild(cub)
sg.addChild(myCustomNode)
tex = coin.SoTexture2()
tex.filename = fn
myCustomNode.insertChild(tex,0)
#---------------
ef.background=myCustomNode
ef.tex=tex
FreeCAD.eventfilter=ef
mw.installEventFilter(ef)
FreeCAD.eventfilter.on_key_press=on_keypress2
FreeCAD.eventfilter.on_move=on_move3
FreeCAD.eventfilter.on_clicks=on_clicks3
FreeCAD.eventfilter.on_windowslist=on_windowslist2
on_keypress2(FreeCAD.eventfilter,'O')
view=FreeCADGui.activeDocument().activeView()
FreeCADGui.ActiveDocument.ActiveView.setAnimationEnabled(False)
mgr=view.getViewer().getSoRenderManager()
mgr.setAutoClipping(0)
FreeCAD.ActiveDocument.recompute()
FreeCADGui.updateGui()
return ef
def runtest():
navi()
ef=navi()
ef.navi.hide()
ef.output.hide()
def Navigator():
runtest()
| lgpl-3.0 | 6,674,549,925,316,580,000 | 22.749786 | 120 | 0.65933 | false | 2.614224 | false | false | false | 0.055455 |
kangkot/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/ctypes/test/test_struct_fields.py | 68 | 1507 | import unittest
from ctypes import *
class StructFieldsTestCase(unittest.TestCase):
# Structure/Union classes must get 'finalized' sooner or
# later, when one of these things happen:
#
# 1. _fields_ is set.
# 2. An instance is created.
# 3. The type is used as field of another Structure/Union.
# 4. The type is subclassed
#
# When they are finalized, assigning _fields_ is no longer allowed.
def test_1_A(self):
class X(Structure):
pass
self.failUnlessEqual(sizeof(X), 0) # not finalized
X._fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_1_B(self):
class X(Structure):
_fields_ = [] # finalized
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_2(self):
class X(Structure):
pass
X()
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_3(self):
class X(Structure):
pass
class Y(Structure):
_fields_ = [("x", X)] # finalizes X
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
def test_4(self):
class X(Structure):
pass
class Y(X):
pass
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
Y._fields_ = []
self.assertRaises(AttributeError, setattr, X, "_fields_", [])
if __name__ == "__main__":
unittest.main()
| apache-2.0 | 3,312,128,138,499,278,000 | 29.14 | 71 | 0.568016 | false | 4.095109 | true | false | false | 0.005309 |
tarthy6/dozer-thesis | py/plot.py | 3 | 43753 | # encoding: utf-8
# 2008 © Václav Šmilauer <eudoxos@arcig.cz>
"""
Module containing utility functions for plotting inside woo. Most functionality is exposed through :obj:`woo.core.Plot`, however.
"""
## all exported names
__all__=['live','liveInterval','autozoom','legendAlpha','scientific','scatterMarkerKw']
import sys
PY3K=sys.version_info[0]==3
pilOk=False
try:
import PIL as Image
pilOk=True
except ImportError: pass
try:
import Image
pilOk=True
except ImportError: pass
if not pilOk: print 'WARN: PIL/Image module (python-imaging) not importable, embedding images into plots will give errors.'
# PY3K
if PY3K:
def _bytes(s): return bytes(s,'ascii')
else:
def _bytes(s): return s
import matplotlib,os,time,math,itertools,sys
# running in batch
#
# If GtkAgg is the default, X must be working, which is not the case
# with batches (DISPLAY is unset in such case) and importing pylab fails then.
#
# Agg does not require the GUI part and works without any DISPLAY active
# just fine.
#
# see http://www.mail-archive.com/woo-dev@lists.launchpad.net/msg04320.html
# and https://lists.launchpad.net/woo-users/msg03289.html
#
# IMPORTANT: this sets woo.runtime.hasDisplay
try: import woo.qt
except ImportError: pass
import woo.runtime, wooMain, woo.config
if wooMain.options.fakeDisplay: woo.runtime.hasDisplay=False
if 'qt4' not in woo.config.features: woo.runtime.hasDisplay=False
if woo.runtime.hasDisplay==None: # not yet set
raise RuntimeError('woo.plot imported before woo.runtime.hasDisplay is set. This should not really happen, please report.')
if not woo.runtime.hasDisplay:
#from matplotlib.backends.backend_agg import FigureCanvasAgg as WooFigureCanvas
matplotlib.use('Agg') ## pylab API
else:
matplotlib.use('Qt4Agg') # pylab API
#from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as WooFigureCanvas
from matplotlib.backends.backend_agg import FigureCanvasAgg as _HeadlessFigureCanvas
from minieigen import *
matplotlib.rc('axes',grid=True) # put grid in all figures
import pylab
# simulation-specific bits moved to woo.core.Plot
# so that they are saved and reloaded with Scene automatically
#
# those remain module-global objects
#
live=True if woo.runtime.hasDisplay else False
"Enable/disable live plot updating. Disabled without display (useless)."
liveInterval=.5
"Interval for the live plot updating, in seconds."
autozoom=True
"Enable/disable automatic plot rezooming after data update."
legendAlpha=.6
'Transparency of legend frames in plots'
scientific=True if hasattr(matplotlib.axes.Axes,'ticklabel_format') else False ## safe default for older matplotlib versions
"Use scientific notation for axes ticks."
current=-1
"Point that is being tracked with a scatter point. -1 is for the last point, set to *nan* to disable."
afterCurrentAlpha=.2
"Color alpha value for part of lines after :obj:`woo.plot.current`, between 0 (invisible) to 1 (full color)"
scatterMarkerKw=dict(verts=[(0.,0.),(-30.,10.),(-25,0),(-30.,-10.)],marker=None)
"Parameters for the current position marker"
annotateKw=dict(horizontalalignment='left',verticalalignment='upper right',fontsize=9)
"Parameters for annotation (current value) display"
lineKw=dict(linewidth=1.5,alpha=.8)
"Parameters for the normal line plot"
componentSeparator='_'
componentSuffixes={Vector2:{-1:'norm',0:'x',1:'y'},Vector3:{-1:'norm',0:'x',1:'y',2:'z'},Vector2i:{0:'x',1:'y'},Vector3i:{0:'x',1:'y',2:'z'},Vector6:{-1:'norm',0:'xx',1:'yy',2:'zz',3:'yz',4:'zx',5:'xy'},Matrix3:{(0,0):'xx',(1,1):'yy',(2,2):'zz',(0,1):'xy',(1,0):'yx',(0,2):'xz',(2,0):'zx',(1,2):'yz',(2,1):'zy'}}
# if a type with entry in componentSuffixes is given in addData, columns for individual components are synthesized using indices and suffixes given for each type; negative index means the norm, which is computed using the 'norm()' method (must be defined by the type)
# e.g. foo=Vector3r(1,2,3) will result in columns foo_x=1,foo_y=2,foo_z=3,foo_norm=3.741657...
def Scene_plot_reset(P):
"Reset all plot-related variables (data, plots, labels)"
P.data,P.plots,P.imgData={},{},{}
pylab.close('all')
def Scene_plot_resetData(P):
"Reset all plot data; keep plots and labels intact."
P.data={}
def Scene_plot_splitData(P):
"Make all plots discontinuous at this point (adds nan's to all data fields)"
P.addData({})
def Scene_plot_reverseData(P):
"""Reverse woo.core.Plot.data order.
Useful for tension-compression test, where the initial (zero) state is loaded and, to make data continuous, last part must *end* in the zero state.
"""
for k in P.data: P.data[k].reverse()
def addDataColumns(data,dd):
'''Add new columns with NaN data, without adding anything to other columns. Does nothing for columns that already exist'''
numSamples=len(data[data.keys()[0]]) if len(data)>0 else 0
for d in dd:
if d in data.keys(): continue
data[d]=[nan for i in range(numSamples)]
def Scene_plot_autoData(P,**kw):
"""Add data by evaluating contents of :obj:`woo.core.Plot.plots`. Expressions rasing exceptions will be handled gracefully, but warning is printed for each.
>>> from woo import plot; from woo.dem import *; from woo.core import *
>>> from pprint import pprint
>>> S=Scene(fields=[DemField(gravity=(0,0,-10))])
>>> S.plot.plots={'S.step':('S.time',None,'numParticles=len(S.dem.par)')}
>>> S.plot.autoData()
>>> pprint(S.plot.data)
{'S.step': [0], 'S.time': [0.0], 'numParticles': [0]}
Note that each item in :obj:`woo.core.Plot.plots` can be
* an expression to be evaluated (using the ``eval`` builtin);
* ``name=expression`` string, where ``name`` will appear as label in plots, and expression will be evaluated each time;
* a dictionary-like object -- current keys are labels of plots and current values are added to :obj:`woo.core.Plot.data`. The contents of the dictionary can change over time, in which case new lines will be created as necessary.
A simple simulation with plot can be written in the following way; note how the energy plot is specified.
>>> from woo import plot, utils
>>> S=Scene(fields=[DemField(gravity=(0,0,-10))])
>>> S.plot.plots={'i=S.step':('**S.energy','total energy=S.energy.total()',None,'rel. error=S.energy.relErr()')}
>>> # we create a simple simulation with one ball falling down
>>> S.dem.par.add(Sphere.make((0,0,0),1,mat=utils.defaultMaterial()))
0
>>> S.engines=[Leapfrog(damping=.4,reset=True),
... # get data required by plots at every step
... PyRunner(1,'S.plot.autoData()')
... ]
>>> S.trackEnergy=True
>>> S.run(3,True)
>>> pprint(S.plot.data) #doctest: +ELLIPSIS
{'grav': [0.0, 0.0, -20.357...],
'i': [0, 1, 2],
'kinetic': [0.0, 1.526..., 13.741...],
'nonviscDamp': [nan, nan, 8.143...],
'rel. error': [0.0, 1.0, 0.0361...],
'total energy': [0.0, 1.526..., 1.526...]}
.. plot::
import woo, woo.plot, woo.utils
from woo.dem import *
from woo.core import *
S=Scene(fields=[DemField(gravity=(0,0,-10))])
S.dem.par.add(Sphere.make((0,0,0),1));
S.engines=[Leapfrog(damping=.4,reset=True),PyRunner('S.plot.autoData()')]
S.plot.plots={'i=S.step':('**S.energy','total energy=S.energy.total()',None,'rel. error=S.energy.relErr()')}
S.trackEnergy=True
S.run(500,True)
S.plot.legendLoc=('lower left','upper right')
S.plot.plot()
"""
def colDictUpdate(col,dic,kw):
'update *dic* with the value from col, which is a "expr" or "name=expr" string; all exceptions from ``eval`` are caught and warning is printed without adding any data.'
name,expr=col.split('=',1) if '=' in col else (col,col)
try:
val=eval(expr,kw)
dic.update({name:val})
except:
import traceback
traceback.print_exc()
print 'WARN: ignoring exception raised while evaluating auto-column `'+expr+"'%s."%('' if name==expr else ' ('+name+')')
cols={}
S=P.scene
# data,imgData,plots=P.data,P.imgData,P.plots
kw.update(S=S)
kw.update(woo=woo)
for p in P.plots:
pp=P.plots[p]
colDictUpdate(p.strip(),cols,kw)
for y in tuplifyYAxis(P.plots[p]):
# imgplot specifier
if y==None: continue
yy=addPointTypeSpecifier(y,noSplit=True)[0]
yy1=yy.split('=')[-1]
# dict-like object
# if hasattr(yy,'keys'): cols.update(dict(yy))
# callable returning list sequence of expressions to evaluate
if yy1.startswith('**'):
try:
dd=eval(yy1[2:],{'S':S})
except:
import traceback
traceback.print_exc()
print 'WARN: ignoring exception raised while evaluating dictionary-returning expression "'+yy1[2:]+':'
for k,v in dd.items(): cols[k]=v
elif yy1.startswith('*'):
ee=eval(yy1[1:],{'S':S})
for e in ee: colDictUpdate(e,cols,{'S':S})
else: colDictUpdate(yy,cols,kw)
P.addData(cols)
def Scene_plot_addData(P,*d_in,**kw):
"""Add data from arguments name1=value1,name2=value2 to woo.plot.data.
(the old {'name1':value1,'name2':value2} is deprecated, but still supported)
New data will be padded with nan's, unspecified data will be nan (nan's don't appear in graphs).
This way, equal length of all data is assured so that they can be plotted one against any other.
>>> S=woo.master.scene
>>> from pprint import pprint
>>> S.plot.resetData()
>>> S.plot.addData(a=1)
>>> S.plot.addData(b=2)
>>> S.plot.addData(a=3,b=4)
>>> pprint(S.plot.data)
{'a': [1, nan, 3], 'b': [nan, 2, 4]}
Some sequence types can be given to addData; they will be saved in synthesized columns for individual components.
>>> S.plot.resetData()
>>> S.plot.addData(c=Vector3(5,6,7),d=Matrix3(8,9,10, 11,12,13, 14,15,16))
>>> pprint(S.plot.data) #doctest: +ELLIPSIS
{'c_norm': [10.488...],
'c_x': [5.0],
'c_y': [6.0],
'c_z': [7.0],
'd_xx': [8.0],
'd_xy': [9.0],
'd_xz': [10.0],
'd_yx': [11.0],
'd_yy': [12.0],
'd_yz': [13.0],
'd_zx': [14.0],
'd_zy': [15.0],
'd_zz': [16.0]}
"""
data,imgData=P.data,P.imgData
import numpy
if len(data)>0: numSamples=len(data[data.keys()[0]])
else: numSamples=0
# align with imgData, if there is more of them than data
if len(imgData)>0 and numSamples==0: numSamples=max(numSamples,len(imgData[imgData.keys()[0]]))
d=(d_in[0] if len(d_in)>0 else {})
d.update(**kw)
# handle types composed of multiple values (vectors, matrices)
dNames=d.keys()[:] # make copy, since dict cannot change size if iterated over directly
for name in dNames:
if type(d[name]) in componentSuffixes:
val=d[name]
suffixes=componentSuffixes[type(d[name])]
for ix in suffixes:
d[name+componentSeparator+suffixes[ix]]=(d[name][ix] if ix>=0 else d[name].norm())
del d[name]
elif hasattr(d[name],'__len__'):
raise ValueError('plot.addData given unhandled sequence type (is a '+type(d[name]).__name__+', must be number or '+'/'.join([k.__name__ for k in componentSuffixes])+')')
for name in d:
if not name in data.keys(): data[name]=[]
for name in data:
data[name]+=(numSamples-len(data[name]))*[nan]
data[name].append(d[name] if name in d else nan)
#print [(k,len(data[k])) for k in data.keys()]
#numpy.array([nan for i in range(numSamples)])
#numpy.append(data[name],[d[name]],1)
def Scene_plot_addImgData(P,**kw):
data,imgData=P.data,P.imgData
for k in kw:
if k not in imgData: imgData[k]=[]
# align imgData with data
if len(data.keys())>0 and len(imgData.keys())>0:
nData,nImgData=len(data[data.keys()[0]]),len(imgData[imgData.keys()[0]])
#if nImgData>nData-1: raise RuntimeError("imgData is already the same length as data?")
if nImgData<nData-1: # repeat last value
for k in imgData.keys():
lastValue=imgData[k][-1] if len(imgData[k])>0 else None
imgData[k]+=(nData-len(imgData[k])-1)*[lastValue]
elif nData<nImgData:
for k in data.keys():
lastValue=data[k][-1] if len(data[k])>0 else nan
data[k]+=(nImgData-nData)*[lastValue] # add one more, because we will append to imgData below
# add values from kw
newLen=(len(imgData[imgData.keys()[0]]) if imgData else 0)+1 # current length plus 1
for k in kw:
if k in imgData and len(imgData[k])>0: imgData[k]+=(newLen-len(imgData[k])-1)*[imgData[k][-1]]+[kw[k]] # repeat last element as necessary
else: imgData[k]=(newLen-1)*[None]+[kw[k]] # repeat None if no previous value
# align values which were not in kw by repeating the last value
for k in imgData:
if len(imgData[k])<newLen: imgData[k]+=(newLen-len(imgData[k]))*[imgData[k][-1]]
assert len(set([len(i) for i in imgData.values()]))<=1 # no data or all having the same value
# not public functions
def addPointTypeSpecifier(o,noSplit=False):
"""Add point type specifier to simple variable name; optionally take only the part before '=' from the first item."""
if type(o) in [tuple,list]:
if noSplit or not type(o[0])==str: return o
else: return (o[0].split('=',1)[0],)+tuple(o[1:])
else: return (o if (noSplit or not type(o)==str) else (o.split('=',1)[0]),'')
def tuplifyYAxis(pp):
"""convert one variable to a 1-tuple"""
if type(pp) in [tuple,list]: return pp
else: return (pp,)
def xlateLabel(l,labels):
"Return translated label; return l itself if not in the labels dict."
if l in labels.keys(): return labels[l]
else: return l
class LineRef:
"""Holds reference to plot line and to original data arrays (which change during the simulation),
and updates the actual line using those data upon request."""
def __init__(self,line,scatter,annotation,line2,xdata,ydata,imgData=None,dataName=None):
self.line,self.scatter,self.annotation,self.line2,self.xdata,self.ydata,self.imgData,self.dataName=line,scatter,annotation,line2,xdata,ydata,imgData,dataName
def update(self):
if isinstance(self.line,matplotlib.image.AxesImage):
# image name
try:
if len(self.xdata)==0 and self.dataName: self.xdata=self.imgData[self.dataName] # empty list reference an empty singleton, not the list we want; adjust here
import Image
if self.xdata[current]==None: img=Image.new('RGBA',(1,1),(0,0,0,0))
else: img=Image.open(self.xdata[current])
self.line.set_data(img)
except IndexError: pass
else:
# regular data
import numpy
# current==-1 avoids copy slicing data in the else part
if current==None or current==-1 or afterCurrentAlpha==1:
self.line.set_xdata(self.xdata); self.line.set_ydata(self.ydata)
self.line2.set_xdata([]); self.line2.set_ydata([])
else:
try: # try if we can extend the first part by one so that lines are connected
self.xdata[:current+1]; preCurrEnd=current+1
except IndexError: preCurrEnd=current
preCurrEnd=current+(1 if len(self.xdata)>current else 0)
self.line.set_xdata(self.xdata[:preCurrEnd]); self.line.set_ydata(self.ydata[:preCurrEnd])
self.line2.set_xdata(self.xdata[current:]); self.line2.set_ydata(self.ydata[current:])
try:
x,y=self.xdata[current],self.ydata[current]
except IndexError: x,y=0,0
# this could be written in a nicer way, very likely
try:
pt=numpy.ndarray((2,),buffer=numpy.array([float(x),float(y)]))
if self.scatter:
self.scatter.set_offsets(pt)
# change rotation of the marker (possibly incorrect)
try:
dx,dy=self.xdata[current]-self.xdata[current-1],self.ydata[current]-self.ydata[current-1]
# smoothing from last n values, if possible
# FIXME: does not show arrow at all if less than window values
#try:
# window=10
# dx,dy=[numpy.average(numpy.diff(dta[current-window:current])) for dta in self.xdata,self.ydata]
#except IndexError: pass
# there must be an easier way to find on-screen derivative angle, ask on the matplotlib mailing list
axes=self.line.get_axes()
p=axes.patch; xx,yy=p.get_verts()[:,0],p.get_verts()[:,1]; size=max(xx)-min(xx),max(yy)-min(yy)
aspect=(size[1]/size[0])*(1./axes.get_data_ratio())
angle=math.atan(aspect*dy/dx)
if dx<0: angle-=math.pi
self.scatter.set_transform(matplotlib.transforms.Affine2D().rotate(angle))
except IndexError: pass
if self.annotation:
if math.isnan(x) or math.isnan(y):
if hasattr(self.annotation,'xyann'): self.annotation.xyann=(x,y)
else: self.annotation.xytext=(0,0)
self.annotation.set_text('') # make invisible, place anywhere
else:
#
if hasattr(self.annotation,'xyann'): self.annotation.xyann=(x,y) # newer MPL versions (>=1.4)
else: self.annotation.xyann=(x,y)
self.annotation.set_text(self.annotation.annotateFmt.format(xy=(float(x),float(y))))
except TypeError: pass # this happens at i386 with empty data, saying TypeError: buffer is too small for requested array
liveTimeStamp=0 # timestamp when live update was started, so that the old thread knows to stop if that changes
nan=float('nan')
def createPlots(P,subPlots=True,noShow=False,replace=True,scatterSize=60,wider=False):
'''Create plots based on current data;
:param subPlots: show all plots in one figure as subplots; otherwise, create multiple figures
:param noShow: use headless backend for plots, and do not show plots on the screen
:param replace: do not close existing figures, and do not update P.currLineRefs
'''
import logging
data,imgData,plots,labels,xylabels,legendLoc,axesWd,annotateFmt=P.data,P.imgData,P.plots,P.labels,P.xylabels,P.legendLoc,P.axesWd,P.annotateFmt
if replace:
if P.currLineRefs:
logging.info('Closing existing figures')
ff=set([l.line.get_axes().get_figure() for l in P.currLineRefs]) # get all current figures
for f in ff: pylab.close(f) # close those
P.currLineRefs=[]
figs=[]
if len(plots)==0: return # nothing to plot
if subPlots:
# compute number of rows and colums for plots we have
subCols=int(round(math.sqrt(len(plots)))); subRows=int(math.ceil(len(plots)*1./subCols))
if wider: subRows,subCols=subCols,subRows
# create a new figure; called once with subPlots, for each subplot without subPlots
def _newFig():
## pylab API
if not noShow: return pylab.figure() # this will go onto the screen; the pylab call sets up the windows as well
else: # with noShow
fig=matplotlib.figure.Figure()
canvas=_HeadlessFigureCanvas(fig) #
return fig
if subPlots: figs=[_newFig()]
for nPlot,p in enumerate(plots.keys()):
pStrip=p.strip().split('=',1)[0]
if not subPlots:
figs.append(_newFig())
axes=figs[-1].add_subplot(1,1,1)
else: axes=figs[-1].add_subplot(subRows,subCols,nPlot+1) # nPlot is 1-based in mpl, for matlab comatibility
axes.grid(True)
if plots[p]==None: # image plot
if not pStrip in imgData.keys(): imgData[pStrip]=[]
# fake (empty) image if no data yet
import Image
if len(imgData[pStrip])==0 or imgData[pStrip][-1]==None: img=Image.new('RGBA',(1,1),(0,0,0,0))
else: img=Image.open(imgData[pStrip][-1])
img=axes.imshow(img,origin='upper')
if replace: P.currLineRefs.append(LineRef(line=img,scatter=None,annotation=None,line2=None,xdata=imgData[pStrip],ydata=None,imgData=imgData,dataName=pStrip))
axes.set_axis_off()
continue
plots_p=[addPointTypeSpecifier(o) for o in tuplifyYAxis(plots[p])]
plots_p_y1,plots_p_y2=[],[]; y1=True
missing=set() # missing data columns
if pStrip not in data.keys(): missing.add(pStrip.decode('utf-8','ignore'))
for d in plots_p:
if d[0]==None:
y1=False; continue
if not isinstance(d[0],(str,unicode)): raise ValueError('Plots specifiers must be strings (not %s)'%(type(d[0]).__name__))
if y1: plots_p_y1.append(d)
else: plots_p_y2.append(d)
try:
if (
d[0] not in data.keys()
# and not callable(d[0])
and not (isinstance(d[0],(str,unicode)) and (d[0].startswith('**') or d[0].startswith('*'))) # hack for callable as strings
# and not hasattr(d[0],'keys')
):
missing.add(d[0])
except UnicodeEncodeError:
import warnings
warnings.error('UnicodeDecodeError when processing data set '+repr(d[0]))
if missing:
if len(data.keys())==0 or len(data[data.keys()[0]])==0: # no data at all yet, do not add garbage NaNs
for m in missing: data[m]=[]
else:
addDataColumns(data,missing)
try:
print 'Missing columns in Scene.plot.data, added NaNs:',', '.join([m.encode('utf-8') for m in missing])
except UnicodeDecodeError:
warnings.warn('UnicodeDecodeError reporting missing data columns -- harmless, just wondering...')
def createLines(pStrip,ySpecs,axes,isY1=True,y2Exists=False):
'''Create data lines from specifications; this code is common for y1 and y2 axes;
it handles y-data specified as callables/dicts passed as string (starting with '*'/'**'), which might create additional lines when updated with liveUpdate.
'''
# save the original specifications; they will be smuggled into the axes object
# the live updated will run yNameFuncs to see if there are new lines to be added
# and will add them if necessary
yNameFuncs=set()
yNames=set()
ySpecs2=[]
for ys in ySpecs:
if not isinstance(ys[0],(str,unicode)): raise ValueError('Plot specifications must be strings (not a %s).'%type(ys[0]))
if ys[0].startswith('**') or ys[0].startswith('*'):
evEx=eval(ys[0][(2 if ys[0].startswith('**') else 1):],{'S':P.scene})
yNameFuncs.add(evEx) # add callable or dictionary
# XXX: what is ys[1]? Previously, there was no line specifier there for dicts at least
# print evEx,type(evEx), evEx.__iter__(),type(evEx.__iter__())
ySpecs2+=[(ret,ys[1]) for ret in evEx] # traverse list or dict keys
else: ySpecs2.append(ys)
if len(ySpecs2)==0:
print 'woo.plot: creating fake plot, since there are no y-data yet'
line,=axes.plot([nan],[nan])
line2,=axes.plot([nan],[nan])
if replace: P.currLineRefs.append(LineRef(line=line,scatter=None,annotation=None,line2=line2,xdata=[nan],ydata=[nan]))
# set different color series for y1 and y2 so that they are recognizable
if matplotlib.rcParams.has_key('axes.color_cycle'): matplotlib.rcParams['axes.color_cycle']='b,g,r,c,m,y,k' if not isY1 else 'm,y,k,b,g,r,c'
for d in ySpecs2:
yNames.add(d)
# should have been handled above already
#if pStrip not in data:
# print 'Missing column %s in Scene.plot.data, added NaN.'%pString
# addDataColumns(data,[pStrip])
if d[0] not in data:
print 'Missing column %s in Scene.plot.data, added NaN.'%d[0]
addDataColumns(data,[d[0]])
line,=axes.plot(data[pStrip],data[d[0]],d[1],label=xlateLabel(d[0],P.labels),**lineKw)
lineKwWithoutAlpha=dict([(k,v) for k,v in lineKw.items() if k!='alpha'])
line2,=axes.plot([],[],d[1],color=line.get_color(),alpha=afterCurrentAlpha,**lineKwWithoutAlpha)
# use (0,0) if there are no data yet
scatterPt=[0,0] if len(data[pStrip])==0 else (data[pStrip][current],data[d[0]][current])
scatterPtPos=[scatterPt[0] if not math.isnan(scatterPt[0]) else 0,scatterPt[1] if not math.isnan(scatterPt[1]) else 0]
# if current value is NaN, use zero instead
scatter=axes.scatter(scatterPtPos[0],scatterPtPos[1],s=scatterSize,color=line.get_color(),**scatterMarkerKw)
if annotateFmt:
if math.isnan(scatterPtPos[0]) or math.isnan(scatterPtPos[1]): text=''
else: text=annotateFmt.format(xy=scatterPt)
annotation=axes.annotate(text,xy=scatterPtPos,color=line.get_color(),**annotateKw)
annotation.annotateFmt=annotateFmt
else: annotation=None
if replace: P.currLineRefs.append(LineRef(line=line,scatter=scatter,annotation=annotation,line2=line2,xdata=data[pStrip],ydata=data[d[0]]))
axes=line.get_axes()
labelLoc=(legendLoc[0 if isY1 else 1] if y2Exists>0 else 'best')
l=axes.legend(loc=labelLoc)
if l:
l.get_frame().set_alpha(legendAlpha)
if hasattr(l,'draggable'): l.draggable(True)
if scientific:
axes.ticklabel_format(style='sci',scilimits=(0,0),axis='both')
# fixes scientific exponent placement for y2: https://sourceforge.net/mailarchive/forum.php?thread_name=20101223174750.GD28779%40ykcyc&forum_name=matplotlib-users
if not isY1: axes.yaxis.set_offset_position('right')
if isY1:
axes.set_ylabel((', '.join([xlateLabel(_p[0],P.labels) for _p in ySpecs2])) if p not in xylabels or not xylabels[p][1] else xylabels[p][1])
axes.set_xlabel(xlateLabel(pStrip,P.labels) if (p not in xylabels or not xylabels[p][0]) else xylabels[p][0])
else:
axes.set_ylabel((', '.join([xlateLabel(_p[0],P.labels) for _p in ySpecs2])) if (p not in xylabels or len(xylabels[p])<3 or not xylabels[p][2]) else xylabels[p][2])
# if there are callable/dict ySpecs, save them inside the axes object, so that the live updater can use those
if yNameFuncs:
axes.wooYNames,axes.wooYFuncs,axes.wooXName,axes.wooLabelLoc=yNames,yNameFuncs,pStrip,labelLoc # prepend woo to avoid clashes
if 0:
# fix missing 'show' method; this has been fixed in matplotlib already, but we need to backport that
# see https://github.com/matplotlib/matplotlib/commit/15fd0ae587a57cb1d7b69546eb359085315148c8
# don't do that for headless backend, error there is fine
fig=axes.get_figure()
if not hasattr(fig,'show'):
mgr=getattr(fig.canvas,'manager')
if mgr: fig.show=lambda *args: mgr.window.show()
createLines(pStrip,plots_p_y1,axes=axes,isY1=True,y2Exists=len(plots_p_y2)>0)
if axesWd>0:
axes.axhline(linewidth=axesWd,color='k')
axes.axvline(linewidth=axesWd,color='k')
# create y2 lines, if any
if len(plots_p_y2)>0:
axes=axes.twinx() # create the y2 axis
createLines(pStrip,plots_p_y2,axes,isY1=False,y2Exists=True)
### scene is not directly accessible from here, do it like this:
S=woo.master.scene
if S.plot==P:
if 'title' in S.tags: axes.set_title(S.tags['title'])
return figs
def liveUpdate(P,timestamp):
global liveTimeStamp
liveTimeStamp=timestamp
import sys
while True:
if not live or liveTimeStamp!=timestamp:
return
figs,axes,linesData=set(),set(),set()
data=P.data
for l in P.currLineRefs:
l.update()
figs.add(l.line.get_figure())
axes.add(l.line.get_axes())
linesData.add(id(l.ydata))
# find callables in y specifiers, create new lines if necessary
for ax in axes:
if not hasattr(ax,'wooYFuncs') or not ax.wooYFuncs: continue # not defined of empty
yy=set();
for f in ax.wooYFuncs:
if callable(f): yy.update(f())
elif hasattr(f,'keys'):
yy.update(f.keys())
else: raise ValueError("Internal error: ax.wooYFuncs items must be callables or dictionary-like objects and nothing else.")
#print 'callables y names:',yy
news=yy-ax.wooYNames
if not news: continue
for new in news:
ax.wooYNames.add(new)
if new in data.keys() and id(data[new]) in linesData: continue # do not add when reloaded and the old lines are already there
print 'woo.plot: creating new line for',new
if not new in data.keys(): data[new]=len(data[ax.wooXName])*[nan] # create data entry if necessary
#print 'data',len(data[ax.wooXName]),len(data[new]),data[ax.wooXName],data[new]
line,=ax.plot(data[ax.wooXName],data[new],label=xlateLabel(new,P.labels)) # no line specifier
line2,=ax.plot([],[],color=line.get_color(),alpha=afterCurrentAlpha)
scatterPt=(0 if len(data[ax.wooXName])==0 or math.isnan(data[ax.wooXName][current]) else data[ax.wooXName][current]),(0 if len(data[new])==0 or math.isnan(data[new][current]) else data[new][current])
scatter=ax.scatter(scatterPt[0],scatterPt[1],s=60,color=line.get_color(),**scatterMarkerKw)
if P.annotateFmt:
annotation=ax.annotate(P.annotateFmt.format(xy=scatterPt),xy=scatterPt,color=line.get_color(),**annotateKw)
annotation.annotateFmt=P.annotateFmt
else: annotation=None
P.currLineRefs.append(LineRef(line=line,scatter=scatter,annotation=annotation,line2=line2,xdata=data[ax.wooXName],ydata=data[new]))
ax.set_ylabel(ax.get_ylabel()+(', ' if ax.get_ylabel() else '')+xlateLabel(new,P.labels))
# it is possible that the legend has not yet been created
l=ax.legend(loc=ax.wooLabelLoc)
if l:
l.get_frame().set_alpha(legendAlpha)
if hasattr(l,'draggable'): l.draggable(True)
if autozoom:
for ax in axes:
try:
ax.relim() # recompute axes limits
ax.autoscale_view()
except RuntimeError: pass # happens if data are being updated and have not the same dimension at the very moment
for fig in figs:
#sys.stderr.write('*')
try:
fig.canvas.draw()
except RuntimeError: pass # happens here too
#sys.stderr.write('(')
time.sleep(liveInterval)
#sys.stderr.write(')')
def savePlotSequence(P,fileBase,stride=1,imgRatio=(5,7),title=None,titleFrames=20,lastFrames=30):
'''Save sequence of plots, each plot corresponding to one line in history. It is especially meant to be used for :obj:`woo.utils.makeVideo`.
:param stride: only consider every stride-th line of history (default creates one frame per each line)
:param title: Create title frame, where lines of title are separated with newlines (``\\n``) and optional subtitle is separated from title by double newline.
:param int titleFrames: Create this number of frames with title (by repeating its filename), determines how long the title will stand in the movie.
:param int lastFrames: Repeat the last frame this number of times, so that the movie does not end abruptly.
:return: List of filenames with consecutive frames.
'''
data,imgData,plots=P.data,P.imgData,P.plots
fig=createPlots(P,noShow=True,replace=True,subPlots=True,scatterSize=60,wider=True)[0]
sqrtFigs=math.sqrt(len(plots))
fig.set_size_inches(8*sqrtFigs,5*sqrtFigs) # better readable
fig.subplots_adjust(left=.05,right=.95,bottom=.05,top=.95) # make it more compact
if len(plots)==1 and plots[plots.keys()[0]]==None: # only pure snapshot is there
fig.set_size_inches(5,5)
fig.subplots_adjust(left=0,right=1,bottom=0,top=1)
#if not data.keys(): raise ValueError("plot.data is empty.")
pltLen=max(len(data[data.keys()[0]]) if data else 0,len(imgData[imgData.keys()[0]]) if imgData else 0)
if pltLen==0: raise ValueError("Both plot.data and plot.imgData are empty.")
global current
ret=[]
print 'Saving %d plot frames, it can take a while...'%(pltLen)
for i,n in enumerate(range(0,pltLen,stride)):
current=n
for l in P.currLineRefs: l.update()
out=fileBase+'-%03d.png'%i
fig.savefig(out)
ret.append(out)
sys.stderr.write('[%d]'%i)
if len(ret)==0: raise RuntimeError("No images created?!")
if title:
import Image
titleImgName=fileBase+'-title.png'
createTitleFrame(titleImgName,Image.open(ret[-1]).size,title)
ret=titleFrames*[titleImgName]+ret
if lastFrames>1: ret+=(lastFrames-1)*[ret[-1]]
return ret
def createTitleFrame(out,size,title,bgColor=(.8,.6,.8),fgColor='#405090',logo=None,logoPos=(20,20)):
'''Create figure with title and save to file.
:param out: file to save the result to; format is anything supported by matplotlib.
:param size: figure size (for pixel output formats), tuple of (width,height)
:param str title: title and subtitle; lines are separated by single newlines (``\n``) and subtitle (if any) is separated from the title by two consecutive newlines (``\n\n``). Oversize lines are scaled to fit the width, line spacing fits all lines.
:param color fgColor: Font color, any `color format that Matplotlib understands <http://matplotlib.org/api/colors_api.html>`__.
:param color bgColor: Background color.
:param logo: filename or file-like object to be read via `matplotlib.pyploy.imread <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.imread>`__.
:param logoPos: position where to place the logo.
'''
import matplotlib, matplotlib.figure, matplotlib.mathtext
# http://stackoverflow.com/a/13714720/761090
dpi=100 # does not matter as font is specified in inches
fig=matplotlib.figure.Figure(figsize=(size[0]/dpi,size[1]/dpi),dpi=dpi,facecolor=bgColor)
canvas=_HeadlessFigureCanvas(fig)
#fig.set_facecolor('blue'); fig.patch.set_color('blue'); fig.patch.set_facecolor('blue'); fig.patch.set_alpha(None)
titSub=title.split('\n\n')
if len(titSub)==0: subtitle=''
elif len(titSub)==1: title,subtitle=titSub
else: title,subtitle=titSub[0],'\n'.join(titSub[1:])
lines=[(t,True) for t in title.split('\n')]+([(t,False) for t in subtitle.split('\n')] if subtitle else [])
nLines=len(lines); fontSizes=size[1]/10.,size[1]/16.
def writeLine(text,vertPos,fontsize):
rgba,depth=matplotlib.mathtext.MathTextParser('Bitmap').to_rgba(text,fontsize=fontsize,dpi=fig.get_dpi(),color=fgColor)
textsize=rgba.shape[1],rgba.shape[0]
if textsize[0]>size[0]:
rgba,depth=matplotlib.mathtext.MathTextParser('Bitmap').to_rgba(text,fontsize=fontsize*size[0]/textsize[0],dpi=fig.get_dpi(),color=fgColor)
textsize=rgba.shape[1],rgba.shape[0]
fig.figimage(rgba.astype(float)/255.,xo=(size[0]-textsize[0])/2.,yo=vertPos-depth)
nTitle,nSubtitle=len(title.split('\n')),len(subtitle.split('\n')) if subtitle else 0
nLines=nTitle+nSubtitle
ht=size[1]; y0=ht-2*fontSizes[0]; yStep=(ht-2.5*fontSizes[0])/(nTitle+.6*nSubtitle+(.5 if nSubtitle else 0))
def lineYOffset(lineno):
# .5*yStep is per between title and subtitle
return nTitle*yStep+.5*yStep+(i-nTitle)*.6*yStep if i>=nTitle else i*yStep
if logo:
import matplotlib.pylab
logoData=pylab.imread(logo)
fig.figimage(logoData,xo=logoPos[0],yo=logoPos[1],origin='upper')
for i,(l,isTitle) in enumerate(lines):
writeLine(l,y0-lineYOffset(i),fontSizes[0 if isTitle else 1])
# http://stackoverflow.com/a/4805178/761090 - savefig default overrides facecolor set previously
fig.savefig(out,facecolor=fig.get_facecolor())
def Scene_plot_plot(P,noShow=False,subPlots=True):
"""Do the actual plot, which is either shown on screen (and nothing is returned: if *noShow* is ``False``) or, if *noShow* is ``True``, returned list of matplotlib's Figure objects.
You can use
>>> import woo,woo.core,os
>>> S=woo.core.Scene()
>>> S.plot.plots={'foo':('bar',)}
>>> S.plot.addData(foo=1,bar=2)
>>> somePdf=woo.master.tmpFilename()+'.pdf'
>>> S.plot.plot(noShow=True)[0].savefig(somePdf)
>>> os.path.exists(somePdf)
True
to save the figure to file automatically.
"""
figs=createPlots(P,subPlots=subPlots,noShow=noShow,replace=(False if noShow else True))
# figs=set([l.line.get_axes().get_figure() for l in P.currLineRefs])
if not figs:
import warnings
warnings.warn('Nothing to plot.')
return
if not hasattr(list(figs)[0],'show') and not noShow:
import warnings
warnings.warn('plot.plot not showing figure (matplotlib using headless backend?)')
noShow=True
if not noShow:
if not woo.runtime.hasDisplay: return # would error out with some backends, such as Agg used in batches
if 1:
if live:
import threading
t=threading.Thread(target=liveUpdate,args=(P,time.time()))
t.daemon=True
t.start()
# pylab.show() # this blocks for some reason; call show on figures directly
for f in figs:
f.show()
# should have fixed https://bugs.launchpad.net/woo/+bug/606220, but does not work apparently
if 0:
import matplotlib.backend_bases
if 'CloseEvent' in dir(matplotlib.backend_bases):
def closeFigureCallback(event):
ff=event.canvas.figure
# remove closed axes from our update list
P.currLineRefs=[l for l in P.currLineRefs if l.line.get_axes().get_figure()!=ff]
f.canvas.mpl_connect('close_event',closeFigureCallback)
# else:
# figs=list(set([l.line.get_axes().get_figure() for l in P.currLineRefs]))
return figs
def Scene_plot_saveDataTxt(P,fileName,vars=None):
"""Save plot data into a (optionally compressed) text file. The first line contains a comment (starting with ``#``) giving variable name for each of the columns. This format is suitable for being loaded for further processing (outside woo) with ``numpy.genfromtxt`` function, which recognizes those variable names (creating numpy array with named entries) and handles decompression transparently.
>>> import woo, woo.core
>>> from pprint import pprint
>>> S=woo.core.Scene()
>>> S.plot.addData(a=1,b=11,c=21,d=31) # add some data here
>>> S.plot.addData(a=2,b=12,c=22,d=32)
>>> pprint(S.plot.data)
{'a': [1, 2], 'b': [11, 12], 'c': [21, 22], 'd': [31, 32]}
>>> txt=woo.master.tmpFilename()+'.txt.bz2'
>>> S.plot.saveDataTxt(txt,vars=('a','b','c'))
>>> import numpy
>>> d=numpy.genfromtxt(txt,dtype=None,names=True)
>>> d['a']
array([1, 2])
>>> d['b']
array([11, 12])
:param fileName: file to save data to; if it ends with ``.bz2`` / ``.gz``, the file will be compressed using bzip2 / gzip.
:param vars: Sequence (tuple/list/set) of variable names to be saved. If ``None`` (default), all variables in :obj:`woo.core.Plot` are saved.
"""
import bz2,gzip
data=P.data
if not vars:
vars=data.keys(); vars.sort()
fileName=P.scene.expandTags(fileName)
if fileName.endswith('.bz2'): f=bz2.BZ2File(fileName,'wb')
elif fileName.endswith('.gz'): f=gzip.GzipFile(fileName,'wb')
else: f=open(fileName,'wb')
f.write(_bytes("# "+"\t".join(vars)+"\n"))
for i in range(len(data[vars[0]])):
f.write(_bytes("\t".join([str(data[var][i]) for var in vars])+"\n"))
f.close()
def savePylab(baseName,timestamp=False,title=None):
'''This function is not finished, do not use it.'''
import time
if len(data.keys())==0: raise RuntimeError("No data for plotting were saved.")
if timestamp: baseName+=_mkTimestamp()
baseNameNoPath=baseName.split('/')[-1]
saveDataTxt(fileName=baseName+'.data.bz2')
if len(plots)==0: raise RuntimeError("No plots to save, only data saved.")
py=file(baseName+'.py','w')
py.write('#!/usr/bin/env python\n# encoding: utf-8\n# created '+time.asctime()+' ('+time.strftime('%Y%m%d_%H:%M')+')\n#\nimport pylab, numpy\n')
py.write("data=numpy.genfromtxt('%s.data.bz2',dtype=None,names=True)\n"%baseName)
subCols=int(round(math.sqrt(len(plots)))); subRows=int(math.ceil(len(plots)*1./subCols))
for nPlot,p in enumerate(plots.keys()):
pStrip=p.strip().split('=',1)[0]
if plots[p]==None: continue # image plots, which is not exported
if len(plots)==1: py.write('pylab.figure()\n')
else: py.write('pylab.subplot(%d,%d,%d)\n'%(subRows,subCols,nPlots))
def _mkTimestamp():
import time
return time.strftime('_%Y%m%d_%H:%M')
def Scene_plot_saveGnuplot(P,baseName,term='wxt',extension=None,timestamp=False,comment=None,title=None,varData=False,timeStamp=True):
"""Save data added with :obj:`woo.plot.addData` into (compressed) file and create .gnuplot file that attempts to mimick plots specified with :obj:`woo.plot.plots`.
:param baseName: used for creating baseName.gnuplot (command file for gnuplot), associated ``baseName.data.bz2`` (data) and output files (if applicable) in the form ``baseName.[plot number].extension``
:param term: specify the gnuplot terminal; defaults to ``x11``, in which case gnuplot will draw persistent windows to screen and terminate; other useful terminals are ``png``, ``cairopdf`` and so on
:param extension: extension for ``baseName`` defaults to terminal name; fine for png for example; if you use ``cairopdf``, you should also say ``extension='pdf'`` however
:param bool timestamp: append numeric time to the basename
:param bool varData: whether file to plot will be declared as variable or be in-place in the plot expression
:param comment: a user comment (may be multiline) that will be embedded in the control file
:return: name of the gnuplot file created.
"""
data,imgData,plots,labels,xylabels=P.data,P.imgData,P.plots,P.labels,P.xylabels
if len(data.keys())==0: raise RuntimeError("No data for plotting were saved.")
if timestamp: baseName+=_mkTimestamp()
baseNameNoPath=baseName.split('/')[-1]
vars=data.keys(); vars.sort()
P.saveDataTxt(fileName=baseName+'.data.bz2',vars=vars)
fPlot=file(baseName+".gnuplot",'w')
fPlot.write('#!/usr/bin/env gnuplot\n#\n')
if timeStamp: fPlot.write('# created '+time.asctime()+' ('+time.strftime('%Y%m%d_%H:%M')+')\n#\n')
if comment: fPlot.write('# '+comment.replace('\n','\n# ')+'#\n')
dataFile='"< bzcat %s.data.bz2"'%(baseNameNoPath)
if varData:
fPlot.write('dataFile=%s'%dataFile); dataFile='dataFile'
if not extension: extension=term
i=0
for p in plots:
pStrip=p.strip().split('=',1)[0]
if plots[p]==None: continue ## this plot is image plot, which is not applicable to gnuplot
plots_p=[addPointTypeSpecifier(o) for o in tuplifyYAxis(plots[p])]
if term in ['wxt','x11']: fPlot.write("set term %s %d persist\n"%(term,i))
else: fPlot.write("set term %s; set output '%s.%d.%s'\n"%(term,baseNameNoPath,i,extension))
fPlot.write("set xlabel '%s'\n"%xlateLabel(p,labels))
fPlot.write("set grid\n")
fPlot.write("set datafile missing 'nan'\n")
if title: fPlot.write("set title '%s'\n"%title)
y1=True; plots_y1,plots_y2=[],[]
# replace callable/dict-like data specifiers by the results, it that particular data exists
plots_p2=[]
for pp in plots_p:
if pp[0]==None: plots_p2.append((pp[0],pp[1]))
elif pp[0].startswith('**'):
try:
dd=eval(pp[0][2:],{'S':P.scene})
plots_p2+=[(ppp,'') for ppp in dd.keys() if ppp in data.keys()]
except:
import traceback
traceback.print_exc()
print 'WARN: ignoring exception raised while evaluating expression "'+pp[0][2:]+'".'
elif pp[0].startswith('*'):
plots_p2+=[(e,'') for e in eval(pp[0][1:],{'S':P.scene}) if e in data.keys()]
else: plots_p2.append((pp[0],pp[1]))
plots_p=plots_p2
#plots_p=sum([([(pp,'') for pp in p[0]() if pp in data.keys()] if callable(p[0]) else [(p[0],p[1])] ) for p in plots_p],[])
for d in plots_p:
if d[0]==None:
y1=False; continue
if y1: plots_y1.append(d)
else: plots_y2.append(d)
fPlot.write("set ylabel '%s'\n"%(','.join([xlateLabel(_p[0],labels) for _p in plots_y1])))
if len(plots_y2)>0:
fPlot.write("set y2label '%s'\n"%(','.join([xlateLabel(_p[0],labels) for _p in plots_y2])))
fPlot.write("set y2tics\n")
ppp=[]
def _mkLine(varX,varY,i):
return " %s using %d:%d title '%s%s(%s)%s' with lines%s"%(dataFile,vars.index(varX)+1,vars.index(varY)+1,'← ' if i==0 else'',xlateLabel(varY,labels),xlateLabel(varX,labels),' →' if i==1 else '',' axes x1y2' if i==1 else '')
for pp in plots_y1: ppp.append(_mkLine(pStrip,pp[0],0))
for pp in plots_y2: ppp.append(_mkLine(pStrip,pp[0],1))
fPlot.write("plot "+",".join(ppp)+"\n")
i+=1
fPlot.close()
return baseName+'.gnuplot'
def _deprecPlotFunc(old,func,new=None,takesScene=False,*args,**kw):
"Wrapper for deprecated functions, example below."
import warnings
if not new: new=old
warnings.warn('Function plot.%s is deprecated, use %s.%s instead.'%(old,('Scene' if takesScene else 'Scene.plot'),new),stacklevel=3,category=DeprecationWarning)
S=woo.master.scene
if takesScene: return func(S,*args,**kw)
else: return func(S.plot,*args,**kw)
#
# DEPRECATED functions, will be removed at some point!
#
def reset(): _deprecPlotFunc('reset',Scene_plot_reset)
def resetData(): _deprecPlotFunc('resetData',Scene_plot_resetData)
def splitData(): _deprecPlotFunc('splitData',Scene_plot_splitData)
def reverseData(): _deprecPlotFunc('reverseData',Scene_plot_reverseData)
def addAutoData(): _deprecPlotFunc('addAutoData',Scene_plot_autoData,new='autoData')
def addData(): _deprecPlotFunc('addData',Scene_plot_addData)
def addImgData(): _deprecPlotFunc('addImgData',Scene_plot_addImgData)
def saveGnuplot(): _deprecPlotFunc('saveGnuplot',Scene_plot_saveGnuplot)
def saveDataTxt(): _deprecPlotFunc('saveDataTxt',Scene_plot_saveDataTxt)
def plot(): _deprecPlotFunc('plot',Scene_plot_plot)
# called at startup from from woo._monkey.plot
def defMonkeyMethods():
import woo.core
woo.core.Plot.reset=Scene_plot_reset
woo.core.Plot.resetData=Scene_plot_resetData
woo.core.Plot.splitData=Scene_plot_splitData
woo.core.Plot.reverseData=Scene_plot_reverseData
woo.core.Plot.autoData=Scene_plot_autoData
woo.core.Plot.addData=Scene_plot_addData
woo.core.Plot.addImgData=Scene_plot_addImgData
woo.core.Plot.saveGnuplot=Scene_plot_saveGnuplot
woo.core.Plot.saveDataTxt=Scene_plot_saveDataTxt
woo.core.Plot.plot=Scene_plot_plot
defMonkeyMethods()
| gpl-2.0 | -1,342,440,483,409,564,000 | 45.341102 | 397 | 0.698692 | false | 2.904781 | false | false | false | 0.046793 |
jeremiak/regulations-site | regulations/views/diff.py | 1 | 8631 | #vim: set encoding=utf-8
from regulations.generator import generator
from regulations.generator.html_builder import HTMLBuilder
from regulations.generator.layers.toc_applier import TableOfContentsLayer
from regulations.generator.node_types import EMPTYPART, REGTEXT
from regulations.generator.section_url import SectionUrl
from regulations.generator.toc import fetch_toc
from regulations.views import error_handling, utils
from regulations.views.chrome import ChromeView
from regulations.views.navigation import choose_next_section
from regulations.views.navigation import choose_previous_section
from regulations.views.partial import PartialView
from django.core.urlresolvers import reverse
def get_appliers(label_id, older, newer):
diff = generator.get_diff_applier(label_id, older, newer)
if diff is None:
raise error_handling.MissingContentException()
appliers = utils.handle_diff_layers(
'graphics,paragraph,keyterms,defined',
label_id,
older,
newer)
appliers += (diff,)
return appliers
class PartialSectionDiffView(PartialView):
""" A diff view of a partial section. """
template_name = 'regulations/regulation-content.html'
def get(self, request, *args, **kwargs):
""" Override GET so that we can catch and propagate any errors. """
try:
return super(PartialSectionDiffView, self).get(request, *args,
**kwargs)
except error_handling.MissingContentException, e:
return error_handling.handle_generic_404(request)
def footer_nav(self, label, toc, old_version, new_version, from_version):
nav = {}
for idx, toc_entry in enumerate(toc):
if toc_entry['section_id'] != label:
continue
p_sect = choose_previous_section(idx, toc)
n_sect = choose_next_section(idx, toc)
if p_sect:
nav['previous'] = p_sect
nav['previous']['url'] = reverse_chrome_diff_view(
p_sect['section_id'], old_version,
new_version, from_version)
if n_sect:
nav['next'] = n_sect
nav['next']['url'] = reverse_chrome_diff_view(
n_sect['section_id'], old_version,
new_version, from_version)
return nav
def get_context_data(self, **kwargs):
# We don't want to run the content data of PartialView -- it assumes
# we will be applying layers
context = super(PartialView, self).get_context_data(**kwargs)
label_id = context['label_id']
older = context['version']
newer = context['newer_version']
tree = generator.get_tree_paragraph(label_id, older)
if tree is None:
#TODO We need a more complicated check here to see if the diffs
#add the requested section. If not -> 404
tree = {}
appliers = get_appliers(label_id, older, newer)
builder = HTMLBuilder(*appliers)
builder.tree = tree
builder.generate_html()
child_of_root = builder.tree
if builder.tree['node_type'] == REGTEXT:
child_of_root = {
'node_type': EMPTYPART,
'children': [builder.tree]}
context['tree'] = {'children': [child_of_root]}
context['markup_page_type'] = 'diff'
regpart = label_id.split('-')[0]
old_toc = fetch_toc(regpart, older)
diff = generator.get_diff_json(regpart, older, newer)
from_version = self.request.GET.get('from_version', older)
context['TOC'] = diff_toc(older, newer, old_toc, diff, from_version)
context['navigation'] = self.footer_nav(label_id, context['TOC'],
older, newer, from_version)
return context
class ChromeSectionDiffView(ChromeView):
"""Search results with chrome"""
template_name = 'regulations/diff-chrome.html'
partial_class = PartialSectionDiffView
has_sidebar = False
def check_tree(self, context):
pass # The tree may or may not exist in the particular version
def add_diff_content(self, context):
context['from_version'] = self.request.GET.get(
'from_version', context['version'])
context['left_version'] = context['version']
context['right_version'] = \
context['main_content_context']['newer_version']
from_version = self.request.GET.get('from_version', context['version'])
context['TOC'] = context['main_content_context']['TOC']
# Add reference to the first subterp, so we know how to redirect
toc = fetch_toc(context['label_id'].split('-')[0], from_version)
for entry in toc:
if entry.get('is_supplement') and entry.get('sub_toc'):
el = entry['sub_toc'][0]
el['url'] = SectionUrl().of(
el['index'], from_version,
self.partial_class.sectional_links)
context['first_subterp'] = el
return context
def add_main_content(self, context):
super(ChromeSectionDiffView, self).add_main_content(context)
return self.add_diff_content(context)
def reverse_chrome_diff_view(sect_id, left_ver, right_ver, from_version):
""" Reverse the URL for a chromed diff view. """
diff_url = reverse(
'chrome_section_diff_view',
args=(sect_id, left_ver, right_ver))
diff_url += '?from_version=%s' % from_version
return diff_url
def extract_sections(toc):
compiled_toc = []
for i in toc:
if 'Subpart' in i['index']:
compiled_toc.extend(i['sub_toc'])
else:
compiled_toc.append(i)
return compiled_toc
def diff_toc(older_version, newer_version, old_toc, diff, from_version):
#We work around Subparts in the TOC for now.
compiled_toc = extract_sections(old_toc)
for node in (v['node'] for v in diff.values() if v['op'] == 'added'):
if len(node['label']) == 2 and node['title']:
element = {
'label': node['title'],
'index': node['label'],
'section_id': '-'.join(node['label']),
'op': 'added'
}
data = {'index': node['label'], 'title': node['title']}
TableOfContentsLayer.section(element, data)
TableOfContentsLayer.appendix_supplement(element, data)
compiled_toc.append(element)
modified, deleted = modified_deleted_sections(diff)
for el in compiled_toc:
if not 'Subpart' in el['index'] and not 'Subjgrp' in el['index']:
el['url'] = reverse_chrome_diff_view(
el['section_id'], older_version, newer_version, from_version)
# Deleted first, lest deletions in paragraphs affect the section
if tuple(el['index']) in deleted and 'op' not in el:
el['op'] = 'deleted'
if tuple(el['index']) in modified and 'op' not in el:
el['op'] = 'modified'
return sort_toc(compiled_toc)
def sort_toc(toc):
""" Sort the Table of Contents elements. """
def normalize(element):
""" Return a sorting order for a TOC element, primarily based
on the index, and the type of content. """
# The general order of a regulation is: regulation text sections,
# appendices, and then the interpretations.
normalized = []
if element.get('is_section'):
normalized.append(0)
elif element.get('is_appendix'):
normalized.append(1)
elif element.get('is_supplement'):
normalized.append(2)
for part in element['index']:
if part.isdigit():
normalized.append(int(part))
else:
normalized.append(part)
return normalized
return sorted(toc, key=lambda el: tuple(normalize(el)))
def modified_deleted_sections(diff):
modified, deleted = set(), set()
for label, diff_value in diff.iteritems():
label = tuple(label.split('-'))
if 'Interp' in label:
section_label = (label[0], 'Interp')
else:
section_label = tuple(label[:2])
# Whole section was deleted
if diff_value['op'] == 'deleted' and label == section_label:
deleted.add(section_label)
# Whole section added/modified or paragraph added/deleted/modified
else:
modified.add(section_label)
return modified, deleted
| cc0-1.0 | -5,635,030,999,713,222,000 | 35.417722 | 79 | 0.597613 | false | 4.031294 | false | false | false | 0.000579 |
nkalodimas/invenio | modules/bibformat/lib/elements/bfe_field.py | 28 | 6253 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints a custom field
"""
__revision__ = "$Id$"
from invenio.bibformat_utils import parse_tag
def format_element(bfo, tag, limit, instances_separator=" ",
subfields_separator=" ", extension="", output_pattern=""):
"""
Prints the given field of a record.
If tag is in range [001, 010], this element assumes
that it accesses a control field. Else it considers it
accesses a data field.
<p>For eg. consider the following metdata:
<pre>
100__ $$aCalatroni, S$$uCERN
245__ $$aStatus of the EP Simulations and Facilities for the SPL
700__ $$aFerreira, L$$uCERN
700__ $$aMacatrao, M$$uCERN
700__ $$aSkala, A$$uCERN
700__ $$aSosin, M$$uCERN
700__ $$ade Waele, R$$uCERN
700__ $$aWithofs, Y$$uKHLim, Diepenbeek
</pre>
The following calls to bfe_field would print:
<pre>
<BFE_FIELD tag="700" instances_separator="<br/>" subfields_separator=" - ">
Ferreira, L - CERN
Macatrao, M - CERN
Skala, A - CERN
Sosin, M - CERN
de Waele, R - CERN
Withofs, Y - KHLim, Diepenbeek
</pre>
</p>
<p>For more advanced formatting, the <code>output_pattern</code>
parameter can be used to output the subfields of each instance in
the specified way. For eg. consider the following metadata:
<pre>
775__ $$b15. Aufl.$$c1995-1996$$nv.1$$pGrundlagen und Werkstoffe$$w317999
775__ $$b12. Aufl.$$c1963$$w278898
775__ $$b14. Aufl.$$c1983$$w107899
775__ $$b13. Aufl.$$c1974$$w99635
</pre>
with the following <code>output_pattern</code>:
<pre>
<a href="/record/%(w)s">%(b)s (%(c)s) %(n)s %(p)s</a>
</pre>
would print:<br/>
<a href="/record/317999">15. Aufl. (1995-1996) v.1 Grundlagen und Werkstoffe</a><br/>
<a href="/record/278898">12. Aufl. (1963) </a><br/>
<a href="/record/107899">14. Aufl. (1983) </a><br/>
<a href="/record/99635">13. Aufl. (1974) </a>
<br/>(<code>instances_separator="<br/>"</code> set for
readability)<br/> The output pattern must follow <a
href="http://docs.python.org/library/stdtypes.html#string-formatting-operations">Python
string formatting</a> syntax. The format must use parenthesized
notation to map to the subfield code. This currently restricts the
support of <code>output_pattern</code> to non-repeatable
subfields</p>
@param tag: the tag code of the field that is to be printed
@param instances_separator: a separator between instances of field
@param subfields_separator: a separator between subfields of an instance
@param limit: the maximum number of values to display.
@param extension: a text printed at the end if 'limit' has been exceeded
@param output_pattern: when specified, prints the subfields of each instance according to pattern specified as parameter (following Python string formatting convention)
"""
# Check if data or control field
p_tag = parse_tag(tag)
if p_tag[0].isdigit() and int(p_tag[0]) in range(0, 11):
return bfo.control_field(tag)
elif p_tag[0].isdigit():
# Get values without subcode.
# We will filter unneeded subcode later
if p_tag[1] == '':
p_tag[1] = '_'
if p_tag[2] == '':
p_tag[2] = '_'
values = bfo.fields(p_tag[0]+p_tag[1]+p_tag[2]) # Values will
# always be a
# list of
# dicts
else:
return ''
x = 0
instances_out = [] # Retain each instance output
for instance in values:
filtered_values = [value for (subcode, value) in instance.iteritems()
if p_tag[3] == '' or p_tag[3] == '%' \
or p_tag[3] == subcode]
if len(filtered_values) > 0:
# We have found some corresponding subcode(s)
if limit.isdigit() and x + len(filtered_values) >= int(limit):
# We are going to exceed the limit
filtered_values = filtered_values[:int(limit)-x] # Takes only needed one
if len(filtered_values) > 0: # do not append empty list!
if output_pattern:
try:
instances_out.append(output_pattern % DictNoKeyError(instance))
except:
pass
else:
instances_out.append(subfields_separator.join(filtered_values))
x += len(filtered_values) # record that so we know limit has been exceeded
break # No need to go further
else:
if output_pattern:
try:
instances_out.append(output_pattern % DictNoKeyError(instance))
except:
pass
else:
instances_out.append(subfields_separator.join(filtered_values))
x += len(filtered_values)
ext_out = ''
if limit.isdigit() and x > int(limit):
ext_out = extension
return instances_separator.join(instances_out) + ext_out
class DictNoKeyError(dict):
def __getitem__(self, key):
if dict.__contains__(self, key):
val = dict.__getitem__(self, key)
else:
val = ''
return val
| gpl-2.0 | -1,610,546,076,519,092,200 | 39.341935 | 172 | 0.592036 | false | 3.669601 | false | false | false | 0.006077 |
verekia/hackarena | hackarena/server.py | 1 | 4323 | from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, Application, url, StaticFileHandler
from tornado.options import define, options
from sockjs.tornado import SockJSRouter, SockJSConnection
import json
class WebSocketHandler(SockJSConnection):
clients = {
'lobby': {},
}
def on_open(self, info):
# Required because the last part of the 3-part session string varies on on_close
# str(self.session): 1416760865.178006 bmv6q4zu 1416760865
# self.sessionString: 1416760865.178006 bmv6q4zu
# self.temporaryName: bmv6q4zu
self.sessionString = getSessionString(str(self.session))
self.temporaryName = generateRandomName(str(self.session))
self.clients['lobby'][self.sessionString] = self
def on_close(self):
del self.clients[self.room][getSessionString(str(self.session))]
self.refresh_users()
def refresh_users(self):
room_users = [self.getName(value) for key, value in self.clients[self.room].items()]
self.broadcast([value for key, value in self.clients[self.room].items()], createMessage('USERS', room_users))
def getName(self, obj=None):
if obj:
return obj.chosenName if hasattr(obj, 'chosenName') else obj.temporaryName
else:
return self.chosenName if hasattr(self, 'chosenName') else self.temporaryName
def broadcast_to_all(self, message):
self.broadcast([value for key, value in self.clients[self.room].items()], message)
def on_message(self, message):
try:
data = json.loads(message)
except:
self.send(createMessage('SIMPLE_MESSAGE', 'Unsupported message type.'))
print 'Received unsupported message type'
return
##############################################
# #
# Backend Events Handling, core logic #
# #
##############################################
if data['type'] == 'ROOM':
self.room = data['content']
if self.clients['lobby'][self.sessionString]:
del self.clients['lobby'][self.sessionString]
try:
self.clients[self.room][self.sessionString] = self
except:
self.clients[self.room] = {}
self.clients[self.room][self.sessionString] = self
self.send(createMessage('ENTERED_ROOM', {'roomName':self.room, 'temporaryName': self.temporaryName}))
self.broadcast_to_all(createMessage('OTHER_ENTERED_ROOM', self.getName()))
self.refresh_users()
elif data['type'] == 'NAME':
old_name = self.getName()
self.chosenName = data['content']
self.broadcast_to_all(createMessage('USER_RENAME', {'previousName': old_name, 'newName': self.chosenName}))
self.refresh_users()
elif data['type'] == 'USER_MESSAGE':
self.broadcast_to_all(createMessage('USER_MESSAGE', {'username': self.getName(), 'message': data['content']}))
else:
self.send(createMessage('MESSAGE', 'Unsupported message type.'))
print 'Received unsupported message type'
##############################################
# #
# App Setup #
# #
##############################################
define('port', default=8888, help="run on the given port", type=int)
define('address', default='192.168.X.X', help="run on the address", type=str)
class IndexHandler(RequestHandler):
def get(self, room):
self.render("index.html")
def make_app():
sock_router = SockJSRouter(WebSocketHandler, '/websocket')
return Application(
sock_router.urls +
[
(r'/assets/(.*)', StaticFileHandler, {'path': 'assets'}),
(r'/static/(.*)', StaticFileHandler, {'path': 'static'}),
url(r'/(.*)', IndexHandler),
]
)
def main():
app = make_app()
app.listen(options.port) #, options.address)
IOLoop.current().start()
if __name__ == '__main__':
options.parse_config_file('server.conf')
main()
| mit | 4,482,904,039,003,858,400 | 34.434426 | 122 | 0.557715 | false | 4.21345 | false | false | false | 0.005552 |
bbc/kamaelia | Sketches/MPS/BugReports/FixTests/Kamaelia/Examples/SimpleGraphicalApps/Ticker/Ticker.py | 6 | 1294 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Kamaelia.UI.Pygame.Ticker import Ticker
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.ReadFileAdaptor import ReadFileAdaptor
Pipeline( ReadFileAdaptor("Ulysses",readmode="line",steptime=0.5),
Ticker(background_colour=(128,48,128),
render_left = 1,
render_top = 1,
render_right = 600,
render_bottom = 200,
position = (100, 300),
)
).run()
| apache-2.0 | -8,735,597,551,677,132,000 | 37.058824 | 78 | 0.690881 | false | 3.707736 | false | false | false | 0.013138 |
TheKK/Shedskin | tests/99.py | 6 | 6253 |
# (c) Mark Dufour, Haifang Ni
# --- mark.dufour@gmail.com
empty, black, white = 0, 1, -1 # [int], [int], [int]
board = [[empty for x in range(8)] for y in range(8)] # [list(list(int))]
board[3][3] = board[4][4] = white # [int]
board[3][4] = board[4][3] = black # [int]
player, depth = {white: 'human', black: 'lalaoth'}, 3 # [dict(int, str)], [int]
def possible_move(board, x, y, color): # board: [list(list(int))], x: [int], y: [int], color: [int]
if board[x][y] != empty: # [int]
return False # [int]
for direction in [(1, 1), (-1, 1), (0, 1), (1, -1), (-1, -1), (0, -1), (1, 0), (-1, 0)]: # [list(tuple2(int, int))]
if flip_in_direction(board, x, y, direction, color): # [int]
return True # [int]
return False # [int]
def flip_in_direction(board, x, y, direction, color): # board: [list(list(int))], x: [int], y: [int], direction: [tuple2(int, int)], color: [int]
other_color = False # [int]
while True: # [int]
x, y = x+direction[0], y+direction[1] # [int], [int]
if x not in range(8) or y not in range(8): # [int]
return False # [int]
square = board[x][y] # [int]
if square == empty: return False # [int]
if square != color: other_color = True # [int]
else: return other_color # [int]
def flip_stones(board, move, color): # board: [list(list(int))], move: [tuple2(int, int)], color: [int]*
global flips
flips += 1 # [int]
for direction in [(1, 1), (-1, 1), (0, 1), (1, -1), (-1, -1), (0, -1), (1, 0), (-1, 0)]: # [list(tuple2(int, int))]
if flip_in_direction(board, move[0], move[1], direction, color): # [int]
x, y = move[0]+direction[0], move[1]+direction[1] # [int], [int]
while board[x][y] != color: # [int]
board[x][y] = color # [int]
x, y = x+direction[0], y+direction[1] # [int], [int]
board[move[0]][move[1]] = color # [int]
#def print_board(board, turn): # board: [], turn: []
# for line in board: # []
# print ' '.join([{white: 'O', black: 'X', empty: '.'}[square] for square in line]) # []
# print 'turn:', player[turn] # [], []
# print 'black:', stone_count(board, black), 'white:', stone_count(board, white) # [], [], [], []
def possible_moves(board, color): # board: [list(list(int))], color: [int]
return [(x,y) for x in range(8) for y in range(8) if possible_move(board, x, y, color)] # [list(tuple2(int, int))]
#def coordinates(move): # move: []
# return (int(move[1])-1, 'abcdefgh'.index(move[0])) # []
def stone_count(board, color): # board: [list(list(int))], color: [int]
return sum([len([square for square in line if square == color]) for line in board]) # [list(int)]
#def human_move(move): # move: []
# return 'abcdefgh'[move[0]]+str(move[1]+1) # []
def best_move(board, color, first, step=1): # board: [list(list(int))], color: [int]*, first: [int], step: [int]
max_move, max_mobility, max_score = None, 0, 0 # [none], [int], [int]
#print 'possible', possible_moves(board, color) # [str], [list(tuple2(int, int))]
for move in possible_moves(board, color): # [list(tuple2(int, int))]
#print 'board before' # [str]
#print_board(board, color) # []
#print 'move', move # [str], [tuple2(int, int)]
if move in [(0,0),(0,7),(7,0),(7,7)]: # [list(tuple2(int, int))]
mobility, score = 64, 64 # [int], [int]
if color != first: # [int]
mobility = 64-mobility # [int]
else:
testboard = [[square for square in line] for line in board] # [list(list(int))]
flip_stones(testboard, move, color) # []
#print_board(testboard, color) # []
if step < depth: # [int]
#print 'deeper' # [str]
next_move, mobility = best_move(testboard, -color, first, step+1) # [tuple2(tuple2(int, int), int)]
else:
#print 'mobility' # [str]
mobility = len(possible_moves(testboard, first)) # [int]
score = mobility # [int]
if color != first: # [int]
score = 64-score # [int]
if score >= max_score: # []
max_move, max_mobility, max_score = move, mobility, score # [tuple2(int, int)], [int], [int]
#print 'done' # [str]
return max_move, max_mobility # [tuple2(tuple2(int, int), int)]
flips = 0 # [int]
steps = 0 # [int]
turn = black # [int]
while possible_moves(board, black) or possible_moves(board, white): # [list(tuple2(int, int))]
if possible_moves(board, turn): # [list(tuple2(int, int))]
#print_board(board, turn) # []
#print 'flips', flips # [str], [int]
# steps += 1 # [int]
# if steps > 5: # [int]
# break
#if turn == black: # [int]
move, mobility = best_move(board, turn, turn) # [tuple2(tuple2(int, int), int)]
#else:
# move = coordinates(raw_input()) # [tuple2(int, int)]
if not possible_move(board, move[0], move[1], turn): # [int]
print 'impossible!' # [str]
turn = -turn # [int]
else:
flip_stones(board, move, turn) # []
turn = -turn # [int]
#print_board(board, turn)
print 'flips', flips # [str], [int]
if stone_count(board, black) == stone_count(board, white): # [int]
print 'draw!' # [str]
else:
if stone_count(board, black) > stone_count(board, white): print player[black], 'wins!' # [str], [str]
else: print player[white], 'wins!' # [str], [str]
| gpl-3.0 | 4,341,224,447,258,199,600 | 50.677686 | 145 | 0.471774 | false | 3.211608 | false | false | false | 0.015193 |
GRIFFINCollaboration/beamCompanionExplorer | data/parseMass.py | 1 | 1034 | import json
def generateMassTable():
'''
generate a mass table for the beam companion explorer.
writes and returns the mass table, a list of dicts keyed as [Z]['A'].
'''
# handy variables
lineLength = 124
headerLength = 39
massTable = []
fid = open("mass.mas12", "r")
# fast forward through header
for i in range(headerLength):
fid.readline()
# unpack each line
while True:
#decode fixed-width columns
record = fid.readline()
if record == '':
break
N = int(record[6:9])
Z = int(record[11:14])
A = N + Z
mass = record[96:110].replace(' ', '')
mass = mass.replace('#', '.')
mass = float(mass)/1000000.
#pack N, Z, mass into dictionary for beam companion explorer:
while len(massTable)-1 < Z:
massTable.append({})
massTable[Z][str(A)] = mass
outputTable = open('mass.dict', 'w')
outputTable.write(json.dumps(massTable))
return massTable | mit | 5,964,059,097,637,266,000 | 23.642857 | 73 | 0.564797 | false | 3.931559 | false | false | false | 0.003868 |
GdZ/scriptfile | software/googleAppEngine/google/appengine/tools/download_appstats.py | 14 | 6073 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Script for downloading Appstats data using remote_api.
Usage:
%prog [-s HOSTNAME] [-p PATH] [-o OUTPUTFILE] [-j] [-q] [-m] [APPID]
If the -s HOSTNAME flag is not specified, the APPID must be specified.
"""
from google.appengine.tools import os_compat
import getpass
import logging
import optparse
import os
import sys
from google.appengine.ext.appstats import loader
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.tools import appengine_rpc
DEFAULT_PATH_PYTHON = '/_ah/remote_api'
DEFAULT_PATH_JAVA = '/remote_api'
DEFAULT_FILE = 'appstats.pkl'
def auth_func():
return (raw_input('Email: '), getpass.getpass('Password: '))
def download_appstats(servername, appid, path, secure,
rpc_server_factory, filename, appdir,
merge, java_application):
"""Invoke remote_api to download appstats data."""
if os.path.isdir(appdir):
sys.path.insert(0, appdir)
try:
logging.info('Importing appengine_config from %s', appdir)
import appengine_config
except ImportError, err:
logging.warn('Failed to load appengine_config: %s', err)
remote_api_stub.ConfigureRemoteApi(appid, path, auth_func,
servername=servername,
save_cookies=True, secure=secure,
rpc_server_factory=rpc_server_factory)
remote_api_stub.MaybeInvokeAuthentication()
os.environ['SERVER_SOFTWARE'] = 'Development (remote_api_shell)/1.0'
if not appid:
appid = os.environ['APPLICATION_ID']
download_data(filename, merge, java_application)
def download_data(filename, merge, java_application):
"""Download appstats data from memcache."""
oldrecords = []
oldfile = None
if merge:
try:
oldfile = open(filename, 'rb')
except IOError:
logging.info('No file to merge. Creating new file %s',
filename)
if oldfile:
logging.info('Merging with existing file %s', filename)
oldrecords = loader.UnpickleFromFile(oldfile)
oldfile.close()
if oldrecords:
last_timestamp = oldrecords[0].start_timestamp_milliseconds()
records = loader.FromMemcache(filter_timestamp=last_timestamp,
java_application=java_application)
else:
records = loader.FromMemcache(java_application=java_application)
merged_records = records + oldrecords
try:
outfile = open(filename, 'wb')
except IOError:
logging.error('Cannot open %s', filename)
return
loader.PickleToFile(merged_records, outfile)
outfile.close()
def main(argv):
"""Parse arguments and run shell."""
parser = optparse.OptionParser(usage=__doc__)
parser.add_option('-s', '--server', dest='server',
help='The hostname your app is deployed on. '
'Defaults to <app_id>.appspot.com.')
parser.add_option('-o', '--output', dest='filename', default=DEFAULT_FILE,
help='The file to which Appstats data must '
'be downloaded. A .pkl extension is '
'recommended. Defaults to %s.' % DEFAULT_FILE)
parser.add_option('-p', '--path', dest='path',
help='The path on the server to the remote_api handler. '
'Defaults to %s for python and %s for java. '
% (DEFAULT_PATH_PYTHON, DEFAULT_PATH_JAVA))
parser.add_option('-q', '--quiet',
action='store_false', dest='verbose', default=True,
help='do not print download status messages to stdout')
parser.add_option('-j', '--java',
action='store_true', dest='java_application', default=False,
help='set this for downloading from a java application')
parser.add_option('-m', '--merge',
action='store_true', dest='merge', default=False,
help='if file exists, merge rather than overwrite')
parser.add_option('--secure', dest='secure', action='store_true',
default=False, help='Use HTTPS when communicating '
'with the server.')
parser.add_option('--appdir', dest='appdir', action='store', default='.',
help='application directory, for finding '
'appengine_config.py. Defaults to ".".')
(options, args) = parser.parse_args()
if ((not options.server and not args) or len(args) > 2
or (options.path and len(args) > 1)):
parser.print_usage(sys.stderr)
if len(args) > 2:
print >> sys.stderr, 'Unexpected arguments: %s' % args[2:]
elif options.path and len(args) > 1:
print >> sys.stderr, 'Path specified twice.'
sys.exit(1)
servername = options.server
appid = None
if options.java_application:
default_path = DEFAULT_PATH_JAVA
else:
default_path = DEFAULT_PATH_PYTHON
path = options.path or default_path
if args:
if servername:
appid = args[0]
else:
servername = '%s.appspot.com' % args[0]
if len(args) == 2:
path = args[1]
if options.verbose:
logging.getLogger().setLevel(logging.INFO)
download_appstats(servername, appid, path, options.secure,
appengine_rpc.HttpRpcServer, options.filename,
options.appdir, options.merge, options.java_application)
if __name__ == '__main__':
main(sys.argv)
| mit | 23,438,916,149,714,190 | 30.963158 | 80 | 0.627367 | false | 3.969281 | false | false | false | 0.010374 |
gangadhar-kadam/verve_test_frappe | frappe/modules/__init__.py | 4 | 3155 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Utilities for using modules
"""
import frappe, os
import frappe.utils
from frappe import _
lower_case_files_for = ['DocType', 'Page', 'Report',
"Workflow", 'Module Def', 'Desktop Item', 'Workflow State', 'Workflow Action', 'Print Format',
"Website Theme"]
def scrub(txt):
return frappe.scrub(txt)
def scrub_dt_dn(dt, dn):
"""Returns in lowercase and code friendly names of doctype and name for certain types"""
ndt, ndn = dt, dn
if dt in lower_case_files_for:
ndt, ndn = scrub(dt), scrub(dn)
return ndt, ndn
def get_module_path(module):
"""Returns path of the given module"""
return frappe.get_module_path(module)
def get_doc_path(module, doctype, name):
dt, dn = scrub_dt_dn(doctype, name)
return os.path.join(get_module_path(module), dt, dn)
def reload_doc(module, dt=None, dn=None, force=True):
from frappe.modules.import_file import import_files
return import_files(module, dt, dn, force=force)
def export_doc(doctype, name, module=None):
"""Write a doc to standard path."""
from frappe.modules.export_file import write_document_file
print doctype, name
if not module: module = frappe.db.get_value('DocType', name, 'module')
write_document_file(frappe.get_doc(doctype, name), module)
def get_doctype_module(doctype):
"""Returns **Module Def** name of given doctype."""
return frappe.db.get_value('DocType', doctype, 'module') or "core"
doctype_python_modules = {}
def load_doctype_module(doctype, module=None, prefix=""):
"""Returns the module object for given doctype."""
if not module:
module = get_doctype_module(doctype)
app = get_module_app(module)
key = (app, doctype, prefix)
if key not in doctype_python_modules:
doctype_python_modules[key] = frappe.get_module(get_module_name(doctype, module, prefix))
return doctype_python_modules[key]
def get_module_name(doctype, module, prefix="", app=None):
return '{app}.{module}.doctype.{doctype}.{prefix}{doctype}'.format(\
app = scrub(app or get_module_app(module)),
module = scrub(module),
doctype = scrub(doctype),
prefix=prefix)
def get_module_app(module):
return frappe.local.module_app[scrub(module)]
def get_app_publisher(module):
app = frappe.local.module_app[scrub(module)]
if not app:
frappe.throw(_("App not found"))
app_publisher = frappe.get_hooks(hook="app_publisher", app_name=app)[0]
return app_publisher
def make_boilerplate(template, doc, opts=None):
target_path = get_doc_path(doc.module, doc.doctype, doc.name)
template_name = template.replace("controller", scrub(doc.name))
target_file_path = os.path.join(target_path, template_name)
app_publisher = get_app_publisher(doc.module)
if not os.path.exists(target_file_path):
if not opts:
opts = {}
with open(target_file_path, 'w') as target:
with open(os.path.join(get_module_path("core"), "doctype", scrub(doc.doctype),
"boilerplate", template), 'r') as source:
target.write(source.read().format(app_publisher=app_publisher,
classname=doc.name.replace(" ", ""), doctype=doc.name, **opts))
| mit | -6,747,962,369,979,691,000 | 31.193878 | 95 | 0.713471 | false | 3.081055 | false | false | false | 0.026307 |
nlfiedler/mal | rpython/step7_quote.py | 47 | 5358 | import sys, traceback
import mal_readline
import mal_types as types
from mal_types import (MalSym, MalInt, MalStr,
nil, true, false, _symbol, _keywordu,
MalList, _list, MalVector, MalHashMap, MalFunc)
import reader, printer
from env import Env
import core
# read
def READ(str):
return reader.read_str(str)
# eval
def is_pair(x):
return types._sequential_Q(x) and len(x) > 0
def quasiquote(ast):
if not is_pair(ast):
return _list(_symbol(u"quote"), ast)
else:
a0 = ast[0]
if isinstance(a0, MalSym):
if a0.value == u'unquote':
return ast[1]
if is_pair(a0) and isinstance(a0[0], MalSym):
a00 = a0[0]
if (isinstance(a00, MalSym) and
a00.value == u'splice-unquote'):
return _list(_symbol(u"concat"),
a0[1],
quasiquote(ast.rest()))
return _list(_symbol(u"cons"),
quasiquote(a0),
quasiquote(ast.rest()))
def eval_ast(ast, env):
if types._symbol_Q(ast):
assert isinstance(ast, MalSym)
return env.get(ast)
elif types._list_Q(ast):
res = []
for a in ast.values:
res.append(EVAL(a, env))
return MalList(res)
elif types._vector_Q(ast):
res = []
for a in ast.values:
res.append(EVAL(a, env))
return MalVector(res)
elif types._hash_map_Q(ast):
new_dct = {}
for k in ast.dct.keys():
new_dct[k] = EVAL(ast.dct[k], env)
return MalHashMap(new_dct)
else:
return ast # primitive value, return unchanged
def EVAL(ast, env):
while True:
#print("EVAL %s" % printer._pr_str(ast))
if not types._list_Q(ast):
return eval_ast(ast, env)
# apply list
if len(ast) == 0: return ast
a0 = ast[0]
if isinstance(a0, MalSym):
a0sym = a0.value
else:
a0sym = u"__<*fn*>__"
if u"def!" == a0sym:
a1, a2 = ast[1], ast[2]
res = EVAL(a2, env)
return env.set(a1, res)
elif u"let*" == a0sym:
a1, a2 = ast[1], ast[2]
let_env = Env(env)
for i in range(0, len(a1), 2):
let_env.set(a1[i], EVAL(a1[i+1], let_env))
ast = a2
env = let_env # Continue loop (TCO)
elif u"quote" == a0sym:
return ast[1]
elif u"quasiquote" == a0sym:
ast = quasiquote(ast[1]) # Continue loop (TCO)
elif u"do" == a0sym:
if len(ast) == 0:
return nil
elif len(ast) > 1:
eval_ast(ast.slice2(1, len(ast)-1), env)
ast = ast[-1] # Continue loop (TCO)
elif u"if" == a0sym:
a1, a2 = ast[1], ast[2]
cond = EVAL(a1, env)
if cond is nil or cond is false:
if len(ast) > 3: ast = ast[3] # Continue loop (TCO)
else: return nil
else:
ast = a2 # Continue loop (TCO)
elif u"fn*" == a0sym:
a1, a2 = ast[1], ast[2]
return MalFunc(None, a2, env, a1, EVAL)
else:
el = eval_ast(ast, env)
f = el.values[0]
if isinstance(f, MalFunc):
if f.ast:
ast = f.ast
env = f.gen_env(el.rest()) # Continue loop (TCO)
else:
return f.apply(el.rest())
else:
raise Exception("%s is not callable" % f)
# print
def PRINT(exp):
return printer._pr_str(exp)
# repl
class MalEval(MalFunc):
def apply(self, args):
return self.EvalFunc(args[0], self.env)
def entry_point(argv):
repl_env = Env()
def REP(str, env):
return PRINT(EVAL(READ(str), env))
# core.py: defined using python
for k, v in core.ns.items():
repl_env.set(_symbol(unicode(k)), MalFunc(v))
repl_env.set(types._symbol(u'eval'),
MalEval(None, env=repl_env, EvalFunc=EVAL))
mal_args = []
if len(argv) >= 3:
for a in argv[2:]: mal_args.append(MalStr(unicode(a)))
repl_env.set(_symbol(u'*ARGV*'), MalList(mal_args))
# core.mal: defined using the language itself
REP("(def! not (fn* (a) (if a false true)))", repl_env)
REP("(def! load-file (fn* (f) (eval (read-string (str \"(do \" (slurp f) \")\")))))", repl_env)
if len(argv) >= 2:
REP('(load-file "' + argv[1] + '")', repl_env)
return 0
while True:
try:
line = mal_readline.readline("user> ")
if line == "": continue
print(REP(line, repl_env))
except EOFError as e:
break
except reader.Blank:
continue
except types.MalException as e:
print(u"Error: %s" % printer._pr_str(e.object, False))
except Exception as e:
print("Error: %s" % e)
#print("".join(traceback.format_exception(*sys.exc_info())))
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point
# Just run entry_point if not RPython compilation
import sys
if not sys.argv[0].endswith('rpython'):
entry_point(sys.argv)
| mpl-2.0 | -2,057,586,742,310,628,600 | 29.793103 | 99 | 0.50056 | false | 3.336239 | false | false | false | 0.005972 |
fernandoacorreia/DjangoWAWSLogging | DjangoWAWSLogging/env/Lib/site-packages/pywin32-218-py2.7-win32.egg/Demos/RegCreateKeyTransacted.py | 40 | 1819 | import win32api, win32con, win32transaction
keyname='Pywin32 test transacted registry functions'
subkeyname='test transacted subkey'
classname='Transacted Class'
trans=win32transaction.CreateTransaction(Description='test RegCreateKeyTransacted')
key, disp=win32api.RegCreateKeyEx(win32con.HKEY_CURRENT_USER, keyname,
samDesired=win32con.KEY_ALL_ACCESS, Class=classname)
## clean up any existing keys
for subk in win32api.RegEnumKeyExW(key):
win32api.RegDeleteKey(key, subk[0])
## reopen key in transacted mode
transacted_key=win32api.RegOpenKeyTransacted(Key=win32con.HKEY_CURRENT_USER, SubKey=keyname,
Transaction=trans, samDesired=win32con.KEY_ALL_ACCESS)
subkey, disp=win32api.RegCreateKeyEx(transacted_key, subkeyname, Transaction=trans,
samDesired=win32con.KEY_ALL_ACCESS, Class=classname)
## Newly created key should not be visible from non-transacted handle
subkeys=[s[0] for s in win32api.RegEnumKeyExW(key)]
assert subkeyname not in subkeys
transacted_subkeys=[s[0] for s in win32api.RegEnumKeyExW(transacted_key)]
assert subkeyname in transacted_subkeys
## Key should be visible to non-transacted handle after commit
win32transaction.CommitTransaction(trans)
subkeys=[s[0] for s in win32api.RegEnumKeyExW(key)]
assert subkeyname in subkeys
## test transacted delete
del_trans=win32transaction.CreateTransaction(Description='test RegDeleteKeyTransacted')
win32api.RegDeleteKeyEx(key, subkeyname, Transaction=del_trans)
## subkey should still show up for non-transacted handle
subkeys=[s[0] for s in win32api.RegEnumKeyExW(key)]
assert subkeyname in subkeys
## ... and should be gone after commit
win32transaction.CommitTransaction(del_trans)
subkeys=[s[0] for s in win32api.RegEnumKeyExW(key)]
assert subkeyname not in subkeys
win32api.RegDeleteKey(win32con.HKEY_CURRENT_USER, keyname)
| mit | 1,724,822,704,044,308,700 | 41.302326 | 92 | 0.812534 | false | 3.120069 | false | false | false | 0.015393 |
seijim/cloud-robotics-azure-platform-v1-sdk | CloudRoboticsApi/ClientCode_Pepper/HeadWaters/PepperCode2/lib/cloudrobotics/client.py | 4 | 6652 | # -*- coding: utf-8 -*-
#
# Cloud Robotics FX クライアント
#
# @author: Hiroki Wakabayashi <hiroki.wakabayashi@jbs.com>
# @version: 0.0.1
import os
import time, datetime
import json
import urllib
import ssl
import base64
import hashlib, hmac
from threading import Thread,Lock
import logging
import paho.mqtt.client as mqtt
from cloudrobotics.message import CRFXMessage
class CRFXClient(object):
def __init__(self, hostname, deviceid, shared_accesskey):
self.hostname = hostname
self.deviceid = deviceid
# paho MQTTクライアントの設定
self.mqtt_client = mqtt.Client(client_id=self.deviceid, protocol=mqtt.MQTTv311)
self.mqtt_client.on_connect = self.__on_connect
self.mqtt_client.on_disconnect = self.__on_disconnect
self.mqtt_client.on_message = self.__on_message
self.mqtt_client.on_publish = self.__on_publish
# Callback
self.on_connect_successful = None
self.on_connect_failed = None
self.on_disconnect = None
self.on_message = None
self.on_publish = None
# デバイスに対して割り当てられているC2DのMQTTのトピック
self.topic = "devices/"+self.deviceid+"/messages/devicebound/#"
# SASの生成
sas = self._create_sas_token(self.hostname, self.deviceid, shared_accesskey)
self.mqtt_client.username_pw_set(username=self.hostname + "/" + self.deviceid, password=sas)
self.mqtt_client.tls_set(os.path.join(os.path.dirname(__file__), 'cert/ca.cer'), tls_version=ssl.PROTOCOL_TLSv1)
self.mqtt_port = 8883
self.lock = Lock()
self.started = False
self.seqno = 0
self.retry_count = 0
logging.basicConfig()
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.DEBUG)
# 接続後の処理
#
def __on_connect(self, client, userdata, flags, rc):
if rc == 0:
# IoT Hubからのメッセージ受信を開始する。
self.mqtt_client.subscribe(self.topic)
self.logger.info('Succeeded to connect to the Azure IoT Hub.')
if self.on_connect_successful: self.on_connect_successful()
# 接続リトライ回数をリセットする。
self.retry_count = 0
# 切断後の処理
#
def __on_disconnect(self, client, userdata, rc):
if rc != 0:
self.mqtt_client.disconnect() # loop_forever()を停止する
# 異常切断が発生した場合は、1秒間隔で5回接続リトライする。
if self.retry_count < 5:
self.retry_count += 1
self.logger.error('Failed to connect to the Azure IoT Hub, rc: %d. Trying to reconnect in %d times.', rc, self.retry_count)
time.sleep(1)
self.start()
else:
self.logger.error("Failed to connect to the Azure IoT Hub even if tried 5 times, gave up reconnecting.")
if self.on_connect_failed: self.on_connect_failed()
elif rc == 0 and not self.started:
if self.on_disconnect: self.on_disconnect()
# メッセージ受信後の処理
#
def __on_message(self, client, userdata, msg):
received_message = CRFXMessage()
received_message.loads(msg.payload)
self.logger.debug("Received message. header: %s, body: %s", received_message.header, received_message.body)
if self.on_message: self.on_message(received_message)
# メッセージ送信後の処理
#
def __on_publish(self, client, userdata, mid):
self.logger.debug("Publish message: [%d]", mid)
if self.on_publish: self.on_publish()
# Security Access Sigunature(SAS)を作成する
# デフォルトの有効期限は20時間(60*60*20=72000)とする。
def _create_sas_token(self, hostname, deviceid, shared_accesskey, expire_term=72000):
expiry = time.mktime(datetime.datetime.now().utctimetuple())+expire_term
expiry = str(int(expiry))
# quoteだと、スラッシュがデフォルトでエンコードされない対象となっているため、safeを空にする。
uri = "{hostname}/devices/{deviceId}".format(hostname=hostname, deviceId=deviceid)
uri_enc = urllib.quote(uri, safe='')
signature = uri_enc + '\n' + expiry
# SharedAccessKeyはBase64でエンコードされているため、デコードする。
k = bytes(base64.b64decode(shared_accesskey))
v = bytes(signature)
# SignatureはHMAC-SHA256で処理する。
sig_enc = base64.b64encode(hmac.new(k, v, digestmod=hashlib.sha256).digest())
sig_enc = urllib.quote(sig_enc, safe='')
# sknにkeyNameが入っていると認証エラーになる。
token = 'SharedAccessSignature sr=' + uri_enc + '&sig=' + sig_enc + '&se=' + expiry
return token
# シーケンス番号のインクリメントを行う
#
def _increment_seq(self):
with self.lock:
self.seqno += 1
return self.seqno
# クライアントの処理を開始する。
#
def start(self):
try:
self.mqtt_client.connect(self.hostname, port=self.mqtt_port)
self.started = True
except Exception as e:
self.logger.error("Failed to connect to the Azure IoT Hub: %s, because: %s", self.hostname, str(e))
self.started = False
return
# 別スレッドで実行する。
thread = Thread(target=self.mqtt_client.loop_forever, args=())
thread.start()
# クライアントの処理を停止する。
#
def stop(self):
if not self.started:
return
try:
self.mqtt_client.unsubscribe(self.topic)
self.mqtt_client.disconnect() # loop_forever()を停止する
except Exception as e:
pass
finally:
self.started = False
# メッセージを送信する。
#
def send_message(self, message):
seq = None
try:
# シーケンス番号をセット
seq = self._increment_seq()
message.set_seq(seq)
self.logger.debug('send[%d]: %s', seq, message.payload())
self.mqtt_client.publish('devices/%s/messages/events/' % (self.deviceid), message.payload(), qos=1)
except Exception as e:
self.logger.error("Failed to send this message, because: %s", str(e))
return seq
| mit | -4,475,299,106,572,988,400 | 31 | 139 | 0.602941 | false | 2.944882 | false | false | false | 0.005682 |
adglkh/mail-all-in-one | src/iredadmin/ini/settings.py | 1 | 4042 | ############################################################
# DO NOT TOUCH BELOW LINE.
#
from libs.default_settings import *
import os
############################################################
# General settings.
#
# Site webmaster's mail address.
webmaster = 'zhb@iredmail.org'
# Default language.
default_language = 'en_US'
# Database backend: mysql.
backend = 'mysql'
# Base directory used to store all mail data.
# iRedMail uses '/var/vmail/vmail1' as default storage directory.
# Tip: You can set a per-domain storage directory in domain profile page.
storage_base_directory = os.getenv('SNAP_DATA') + '/var/vmail/vmail1'
# Default mta transport.
# iRedMail uses 'dovecot' as defualt transport.
# Tip: You can set a per-domain or per-user transport in domain or user
# profile page.
default_mta_transport = 'dovecot'
# Min/Max admin password length.
# - min_passwd_length: 0 means unlimited, but at least 1 character
# is required.
# - max_passwd_length: 0 means unlimited.
# User password length is controlled in domain profile.
min_passwd_length = 8
max_passwd_length = 0
#####################################################################
# Database used to store iRedAdmin data. e.g. sessions, log.
#
iredadmin_db_host = 'localhost'
iredadmin_db_port = 3306
iredadmin_db_name = 'iredadmin'
iredadmin_db_user = 'iredadmin'
iredadmin_unix_socket = os.getenv('SNAP_DATA') + '/mysql/mysql.sock'
iredadmin_password_file=os.getenv('SNAP_DATA') + '/mysql/iredadmin_password'
with open(iredadmin_password_file, 'r') as iredpasswd_file:
iredadmin_db_password = iredpasswd_file.read().replace('\n', '')
############################################
# Database used to store mail accounts.
#
vmail_db_host = 'localhost'
vmail_db_port = 3306
vmail_db_name = 'vmailadmin'
vmail_db_user = 'vmailadmin'
vmail_unix_socket = os.getenv('SNAP_DATA') + '/mysql/mysql.sock'
vmailadmin_password_file=os.getenv('SNAP_DATA') + '/mysql/vmailadmin_password'
with open(vmailadmin_password_file, 'r') as vmailpasswd_file:
vmail_db_password = vmailpasswd_file.read().replace('\n', '')
##############################################################################
# Settings used for Amavisd-new integration. Provides spam/virus quaranting,
# releasing, etc.
#
# Log basic info of in/out emails into SQL (@storage_sql_dsn): True, False.
# It's @storage_sql_dsn setting in amavisd. You can find this setting
# in amavisd-new config files:
# - On RHEL/CentOS: /etc/amavisd.conf or /etc/amavisd/amavisd.conf
# - On Debian/Ubuntu: /etc/amavis/conf.d/50-user.conf
# - On FreeBSD: /usr/local/etc/amavisd.conf
# Reference:
# http://www.iredmail.org/wiki/index.php?title=IRedMail/FAQ/Integrate.MySQL.in.Amavisd
amavisd_enable_logging = True
amavisd_db_host = '127.0.0.1'
amavisd_db_port = 3306
amavisd_db_name = 'amavisd'
amavisd_db_user = 'amavisd'
amavisd_db_password = 'password'
# #### Quarantining ####
# Release quarantined SPAM/Virus mails: True, False.
# iRedAdmin-Pro will connect to @quarantine_server to release quarantined mails.
# How to enable quarantining in Amavisd-new:
# http://www.iredmail.org/wiki/index.php?title=IRedMail/FAQ/Quarantining.SPAM
amavisd_enable_quarantine = False
# Port of Amavisd protocol 'AM.PDP-INET'. Default is 9998.
amavisd_quarantine_port = 9998
# Enable per-recipient spam policy, white/blacklist.
amavisd_enable_policy_lookup = True
##############################################################################
# Settings used for iRedAPD integration: throttling and more.
#
# Enable iRedAPD integration.
iredapd_enabled = False
# SQL server/port and credential used to connect to iRedAPD SQL database.
iredapd_db_host = '127.0.0.1'
iredapd_db_port = 3306
iredapd_db_name = 'iredapd'
iredapd_db_user = 'iredapd'
iredapd_db_password = 'password'
##############################################################################
# Place your custom settings below, you can override all settings in this file
# and libs/default_settings.py here.
#
| gpl-3.0 | -4,877,565,993,939,063,000 | 33.254237 | 86 | 0.645473 | false | 3.185185 | false | false | false | 0.000742 |
linvictor88/vse-lbaas-driver | quantum/openstack/common/rpc/impl_fake.py | 3 | 5840 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake RPC implementation which calls proxy methods directly with no
queues. Casts will block, but this is very useful for tests.
"""
import inspect
# NOTE(russellb): We specifically want to use json, not our own jsonutils.
# jsonutils has some extra logic to automatically convert objects to primitive
# types so that they can be serialized. We want to catch all cases where
# non-primitive types make it into this code and treat it as an error.
import json
import time
import eventlet
from quantum.openstack.common.rpc import common as rpc_common
CONSUMERS = {}
class RpcContext(rpc_common.CommonRpcContext):
def __init__(self, **kwargs):
super(RpcContext, self).__init__(**kwargs)
self._response = []
self._done = False
def deepcopy(self):
values = self.to_dict()
new_inst = self.__class__(**values)
new_inst._response = self._response
new_inst._done = self._done
return new_inst
def reply(self, reply=None, failure=None, ending=False):
if ending:
self._done = True
if not self._done:
self._response.append((reply, failure))
class Consumer(object):
def __init__(self, topic, proxy):
self.topic = topic
self.proxy = proxy
def call(self, context, version, method, namespace, args, timeout):
done = eventlet.event.Event()
def _inner():
ctxt = RpcContext.from_dict(context.to_dict())
try:
rval = self.proxy.dispatch(context, version, method,
namespace, **args)
res = []
# Caller might have called ctxt.reply() manually
for (reply, failure) in ctxt._response:
if failure:
raise failure[0], failure[1], failure[2]
res.append(reply)
# if ending not 'sent'...we might have more data to
# return from the function itself
if not ctxt._done:
if inspect.isgenerator(rval):
for val in rval:
res.append(val)
else:
res.append(rval)
done.send(res)
except rpc_common.ClientException as e:
done.send_exception(e._exc_info[1])
except Exception as e:
done.send_exception(e)
thread = eventlet.greenthread.spawn(_inner)
if timeout:
start_time = time.time()
while not done.ready():
eventlet.greenthread.sleep(1)
cur_time = time.time()
if (cur_time - start_time) > timeout:
thread.kill()
raise rpc_common.Timeout()
return done.wait()
class Connection(object):
"""Connection object."""
def __init__(self):
self.consumers = []
def create_consumer(self, topic, proxy, fanout=False):
consumer = Consumer(topic, proxy)
self.consumers.append(consumer)
if topic not in CONSUMERS:
CONSUMERS[topic] = []
CONSUMERS[topic].append(consumer)
def close(self):
for consumer in self.consumers:
CONSUMERS[consumer.topic].remove(consumer)
self.consumers = []
def consume_in_thread(self):
pass
def create_connection(conf, new=True):
"""Create a connection"""
return Connection()
def check_serialize(msg):
"""Make sure a message intended for rpc can be serialized."""
json.dumps(msg)
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
namespace = msg.get('namespace', None)
try:
consumer = CONSUMERS[topic][0]
except (KeyError, IndexError):
return iter([None])
else:
return consumer.call(context, version, method, namespace, args,
timeout)
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg):
check_serialize(msg)
try:
call(conf, context, topic, msg)
except Exception:
pass
def notify(conf, context, topic, msg, envelope):
check_serialize(msg)
def cleanup():
pass
def fanout_cast(conf, context, topic, msg):
"""Cast to all consumers of a topic"""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
namespace = msg.get('namespace', None)
for consumer in CONSUMERS.get(topic, []):
try:
consumer.call(context, version, method, namespace, args, None)
except Exception:
pass
| apache-2.0 | 5,551,312,565,143,124,000 | 28.948718 | 78 | 0.593151 | false | 4.192391 | false | false | false | 0 |
nitzmahone/ansible | lib/ansible/modules/cloud/amazon/aws_az_facts.py | 34 | 3991 | #!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = '''
module: aws_az_facts
short_description: Gather facts about availability zones in AWS.
description:
- Gather facts about availability zones in AWS.
version_added: '2.5'
author: 'Henrique Rodrigues (@Sodki)'
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html) for
possible filters. Filter names and values are case sensitive. You can also use underscores
instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
required: false
default: {}
extends_documentation_fragment:
- aws
- ec2
requirements: [botocore, boto3]
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather facts about all availability zones
- aws_az_facts:
# Gather facts about a single availability zone
- aws_az_facts:
filters:
zone-name: eu-west-1a
'''
RETURN = '''
availability_zones:
returned: on success
description: >
Availability zones that match the provided filters. Each element consists of a dict with all the information
related to that available zone.
type: list
sample: "[
{
'messages': [],
'region_name': 'us-west-1',
'state': 'available',
'zone_name': 'us-west-1b'
},
{
'messages': [],
'region_name': 'us-west-1',
'state': 'available',
'zone_name': 'us-west-1c'
}
]"
'''
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec, boto3_conn
from ansible.module_utils.ec2 import ansible_dict_to_boto3_filter_list, camel_dict_to_snake_dict, HAS_BOTO3
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # will be detected by imported HAS_BOTO3
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(default={}, type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(
module,
conn_type='client',
resource='ec2',
region=region,
endpoint=ec2_url,
**aws_connect_params
)
# Replace filter key underscores with dashes, for compatibility
sanitized_filters = dict((k.replace('_', '-'), v) for k, v in module.params.get('filters').items())
try:
availability_zones = connection.describe_availability_zones(
Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
)
except ClientError as e:
module.fail_json(msg="Unable to describe availability zones: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except BotoCoreError as e:
module.fail_json(msg="Unable to describe availability zones: {0}".format(to_native(e)),
exception=traceback.format_exc())
# Turn the boto3 result into ansible_friendly_snaked_names
snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']]
module.exit_json(availability_zones=snaked_availability_zones)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,164,969,291,382,267,000 | 30.928 | 116 | 0.657229 | false | 3.804576 | false | false | false | 0.005011 |
idbedead/RNA-sequence-tools | Tophat_Cluster_submission/test_qsub.py | 2 | 3040 | #/usr/bin/env python
import commands
import os
from subprocess import call
#This is for adjusting parameters on your local system to generate proper file names for qsub
path = '/Volumes/Seq_data/werbz'
out= '${TMPDIR}'
annotation_file = '/netapp/home/idriver/genes_E_RS.gtf'
index_gen_loc = '/netapp/home/idriver/mm10_ERCC_RS_bt2/mm10_ERCC_RS/mm10_ERCC_RS'
#this next section parses the file names so that the paired end reads are in order and determines the name of the output file
#use test_qsub.py to test and modify this section locally to work for your file names
pathlist = []
for root, dirs, files in os.walk(path):
if root.split('/')[-1]=='werbz':
for lane in dirs:
samp_file_name = 'results_lib_'+lane.split('-')[-1]
#call('mkdir -p /netapp/home/idriver/%s' % result_name, shell=True)
elif dirs == ['fastqc']:
n = root.strip('/').split('/')[-1]
out= '${TMPDIR}'
print n
name1 = n.split('_')[1]
name = '_'.join(name1.split('-')[1:])
result_file_name = 'results_lib_'+name.split('_')[1]
print name, result_file_name
data_file = root
result_file = os.path.join(out,name)
input_files=''
r_num = []
for f in files:
if 'fastq' in f and ".txt" not in f:
f_split = f.split('_')
r_name = (f_split[3][1])
en_split = f_split[4].split('.')
p_num = en_split[0].strip('00')
rank = r_name+p_num
r_num.append(int(rank))
input_files+=os.path.join(root,f)+' '
in_split = input_files.split(' ')
sort_num = [x for (y,x) in sorted(zip(r_num,in_split))]
if len(in_split) > 2:
name_build = ''
for i, mul_f in enumerate(sort_num):
if 'fastq' in mul_f:
if i == len(in_split)-1:
name_build+=mul_f
elif i < (len(in_split)/2)-1 or i > (len(in_split)/2)-1:
name_build+= mul_f+','
elif i == (len(in_split)/2)-1:
name_build+= mul_f+' '
final_files = name_build.strip(',')
elif len(in_split) == 2:
try:
final_files = sort_num[0]+' '+sort_num[1].strip(',')
except IndexError:
print 'Incomplete File: '+name
tophat_cmd = 'tophat2 -p 8 -r 230 -a 30 --read-realign-edit-dist 0 -G '+annotation_file+' --transcriptome-index=/netapp/home/idriver/transcriptome_data_mm10_RS/known_e_RS -o '+result_file+' '+index_gen_loc+' '+final_files
samtools_cmd = 'samtools sort '+result_file+'/'+'accepted_hits.bam accepted_hits_sorted'
cufflinks_cmd = 'cufflinks -p 8 --max-bundle-frags 10000000 -G '+annotation_file+' -o '+result_file+' '+result_file+'/'+'accepted_hits.bam'
cuffquant_cmd = 'cuffquant -p 8 --max-bundle-frags 10000000 -o '+result_file+' '+annotation_file+' '+result_file+'/'+'accepted_hits.bam'
print tophat_cmd
| mit | 6,435,275,208,027,718,000 | 46.5 | 229 | 0.554276 | false | 3.29718 | false | false | false | 0.009868 |
mapennell/ansible | cloud/misc/virt.py | 8 | 14024 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Virt management features
Copyright 2007, 2012 Red Hat, Inc
Michael DeHaan <michael.dehaan@gmail.com>
Seth Vidal <skvidal@fedoraproject.org>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: virt
short_description: Manages virtual machines supported by libvirt
description:
- Manages virtual machines supported by I(libvirt).
version_added: "0.2"
options:
name:
description:
- name of the guest VM being managed. Note that VM must be previously
defined with xml.
required: true
default: null
aliases: []
state:
description:
- Note that there may be some lag for state requests like C(shutdown)
since these refer only to VM states. After starting a guest, it may not
be immediately accessible.
required: false
choices: [ "running", "shutdown", "destroyed", "paused" ]
default: "no"
command:
description:
- in addition to state management, various non-idempotent commands are available. See examples
required: false
choices: ["create","status", "start", "stop", "pause", "unpause",
"shutdown", "undefine", "destroy", "get_xml", "autostart",
"freemem", "list_vms", "info", "nodeinfo", "virttype", "define"]
uri:
description:
- libvirt connection uri
required: false
defaults: qemu:///system
xml:
description:
- XML document used with the define command
required: false
default: null
requirements:
- "python >= 2.6"
- "libvirt-python"
author:
- "Ansible Core Team"
- '"Michael DeHaan (@mpdehaan)" <michael.dehaan@gmail.com>'
- '"Seth Vidal (@skvidal)" <skvidal@fedoraproject.org>'
'''
EXAMPLES = '''
# a playbook task line:
- virt: name=alpha state=running
# /usr/bin/ansible invocations
ansible host -m virt -a "name=alpha command=status"
ansible host -m virt -a "name=alpha command=get_xml"
ansible host -m virt -a "name=alpha command=create uri=lxc:///"
# a playbook example of defining and launching an LXC guest
tasks:
- name: define vm
virt: name=foo
command=define
xml="{{ lookup('template', 'container-template.xml.j2') }}"
uri=lxc:///
- name: start vm
virt: name=foo state=running uri=lxc:///
'''
VIRT_FAILED = 1
VIRT_SUCCESS = 0
VIRT_UNAVAILABLE=2
import sys
try:
import libvirt
except ImportError:
print "failed=True msg='libvirt python module unavailable'"
sys.exit(1)
ALL_COMMANDS = []
VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define']
HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype']
ALL_COMMANDS.extend(VM_COMMANDS)
ALL_COMMANDS.extend(HOST_COMMANDS)
VIRT_STATE_NAME_MAP = {
0 : "running",
1 : "running",
2 : "running",
3 : "paused",
4 : "shutdown",
5 : "shutdown",
6 : "crashed"
}
class VMNotFound(Exception):
pass
class LibvirtConnection(object):
def __init__(self, uri, module):
self.module = module
cmd = "uname -r"
rc, stdout, stderr = self.module.run_command(cmd)
if "xen" in stdout:
conn = libvirt.open(None)
else:
conn = libvirt.open(uri)
if not conn:
raise Exception("hypervisor connection failure")
self.conn = conn
def find_vm(self, vmid):
"""
Extra bonus feature: vmid = -1 returns a list of everything
"""
conn = self.conn
vms = []
# this block of code borrowed from virt-manager:
# get working domain's name
ids = conn.listDomainsID()
for id in ids:
vm = conn.lookupByID(id)
vms.append(vm)
# get defined domain
names = conn.listDefinedDomains()
for name in names:
vm = conn.lookupByName(name)
vms.append(vm)
if vmid == -1:
return vms
for vm in vms:
if vm.name() == vmid:
return vm
raise VMNotFound("virtual machine %s not found" % vmid)
def shutdown(self, vmid):
return self.find_vm(vmid).shutdown()
def pause(self, vmid):
return self.suspend(self.conn,vmid)
def unpause(self, vmid):
return self.resume(self.conn,vmid)
def suspend(self, vmid):
return self.find_vm(vmid).suspend()
def resume(self, vmid):
return self.find_vm(vmid).resume()
def create(self, vmid):
return self.find_vm(vmid).create()
def destroy(self, vmid):
return self.find_vm(vmid).destroy()
def undefine(self, vmid):
return self.find_vm(vmid).undefine()
def get_status2(self, vm):
state = vm.info()[0]
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def get_status(self, vmid):
state = self.find_vm(vmid).info()[0]
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def nodeinfo(self):
return self.conn.getInfo()
def get_type(self):
return self.conn.getType()
def get_xml(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.XMLDesc(0)
def get_maxVcpus(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.maxVcpus()
def get_maxMemory(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.maxMemory()
def getFreeMemory(self):
return self.conn.getFreeMemory()
def get_autostart(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.autostart()
def set_autostart(self, vmid, val):
vm = self.conn.lookupByName(vmid)
return vm.setAutostart(val)
def define_from_xml(self, xml):
return self.conn.defineXML(xml)
class Virt(object):
def __init__(self, uri, module):
self.module = module
self.uri = uri
def __get_conn(self):
self.conn = LibvirtConnection(self.uri, self.module)
return self.conn
def get_vm(self, vmid):
self.__get_conn()
return self.conn.find_vm(vmid)
def state(self):
vms = self.list_vms()
state = []
for vm in vms:
state_blurb = self.conn.get_status(vm)
state.append("%s %s" % (vm,state_blurb))
return state
def info(self):
vms = self.list_vms()
info = dict()
for vm in vms:
data = self.conn.find_vm(vm).info()
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
# xmlrpclib tries to convert to regular int's during serialization.
# This throws exceptions, so convert them to strings here and
# assume the other end of the xmlrpc connection can figure things
# out or doesn't care.
info[vm] = {
"state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"),
"maxMem" : str(data[1]),
"memory" : str(data[2]),
"nrVirtCpu" : data[3],
"cpuTime" : str(data[4]),
}
info[vm]["autostart"] = self.conn.get_autostart(vm)
return info
def nodeinfo(self):
self.__get_conn()
info = dict()
data = self.conn.nodeinfo()
info = {
"cpumodel" : str(data[0]),
"phymemory" : str(data[1]),
"cpus" : str(data[2]),
"cpumhz" : str(data[3]),
"numanodes" : str(data[4]),
"sockets" : str(data[5]),
"cpucores" : str(data[6]),
"cputhreads" : str(data[7])
}
return info
def list_vms(self, state=None):
self.conn = self.__get_conn()
vms = self.conn.find_vm(-1)
results = []
for x in vms:
try:
if state:
vmstate = self.conn.get_status2(x)
if vmstate == state:
results.append(x.name())
else:
results.append(x.name())
except:
pass
return results
def virttype(self):
return self.__get_conn().get_type()
def autostart(self, vmid):
self.conn = self.__get_conn()
return self.conn.set_autostart(vmid, True)
def freemem(self):
self.conn = self.__get_conn()
return self.conn.getFreeMemory()
def shutdown(self, vmid):
""" Make the machine with the given vmid stop running. Whatever that takes. """
self.__get_conn()
self.conn.shutdown(vmid)
return 0
def pause(self, vmid):
""" Pause the machine with the given vmid. """
self.__get_conn()
return self.conn.suspend(vmid)
def unpause(self, vmid):
""" Unpause the machine with the given vmid. """
self.__get_conn()
return self.conn.resume(vmid)
def create(self, vmid):
""" Start the machine via the given vmid """
self.__get_conn()
return self.conn.create(vmid)
def start(self, vmid):
""" Start the machine via the given id/name """
self.__get_conn()
return self.conn.create(vmid)
def destroy(self, vmid):
""" Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """
self.__get_conn()
return self.conn.destroy(vmid)
def undefine(self, vmid):
""" Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """
self.__get_conn()
return self.conn.undefine(vmid)
def status(self, vmid):
"""
Return a state suitable for server consumption. Aka, codes.py values, not XM output.
"""
self.__get_conn()
return self.conn.get_status(vmid)
def get_xml(self, vmid):
"""
Receive a Vm id as input
Return an xml describing vm config returned by a libvirt call
"""
self.__get_conn()
return self.conn.get_xml(vmid)
def get_maxVcpus(self, vmid):
"""
Gets the max number of VCPUs on a guest
"""
self.__get_conn()
return self.conn.get_maxVcpus(vmid)
def get_max_memory(self, vmid):
"""
Gets the max memory on a guest
"""
self.__get_conn()
return self.conn.get_MaxMemory(vmid)
def define(self, xml):
"""
Define a guest with the given xml
"""
self.__get_conn()
return self.conn.define_from_xml(xml)
def core(module):
state = module.params.get('state', None)
guest = module.params.get('name', None)
command = module.params.get('command', None)
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
v = Virt(uri, module)
res = {}
if state and command=='list_vms':
res = v.list_vms(state=state)
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
if state:
if not guest:
module.fail_json(msg = "state change requires a guest specified")
res['changed'] = False
if state == 'running':
if v.status(guest) is 'paused':
res['changed'] = True
res['msg'] = v.unpause(guest)
elif v.status(guest) is not 'running':
res['changed'] = True
res['msg'] = v.start(guest)
elif state == 'shutdown':
if v.status(guest) is not 'shutdown':
res['changed'] = True
res['msg'] = v.shutdown(guest)
elif state == 'destroyed':
if v.status(guest) is not 'shutdown':
res['changed'] = True
res['msg'] = v.destroy(guest)
elif state == 'paused':
if v.status(guest) is 'running':
res['changed'] = True
res['msg'] = v.pause(guest)
else:
module.fail_json(msg="unexpected state")
return VIRT_SUCCESS, res
if command:
if command in VM_COMMANDS:
if not guest:
module.fail_json(msg = "%s requires 1 argument: guest" % command)
if command == 'define':
if not xml:
module.fail_json(msg = "define requires xml argument")
try:
v.get_vm(guest)
except VMNotFound:
v.define(xml)
res = {'changed': True, 'created': guest}
return VIRT_SUCCESS, res
res = getattr(v, command)(guest)
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
elif hasattr(v, command):
res = getattr(v, command)()
if type(res) != dict:
res = { command: res }
return VIRT_SUCCESS, res
else:
module.fail_json(msg="Command %s not recognized" % basecmd)
module.fail_json(msg="expected state or command parameter to be specified")
def main():
module = AnsibleModule(argument_spec=dict(
name = dict(aliases=['guest']),
state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']),
command = dict(choices=ALL_COMMANDS),
uri = dict(default='qemu:///system'),
xml = dict(),
))
rc = VIRT_SUCCESS
try:
rc, result = core(module)
except Exception, e:
module.fail_json(msg=str(e))
if rc != 0: # something went wrong emit the msg
module.fail_json(rc=rc, msg=result)
else:
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 5,158,236,212,317,435,000 | 27.160643 | 116 | 0.558115 | false | 3.767867 | false | false | false | 0.005205 |
kevinmel2000/sl4a | python/build.py | 20 | 6064 | #!/usr/bin/python
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import compileall
import glob
import os
import re
import subprocess
import shutil
import sys
import zipfile
def run(cmd, exit=True, cwd=None):
print cmd
if subprocess.Popen(cmd.split(), cwd=cwd).wait() != 0:
if exit:
print 'Failed!'
sys.exit(1)
else:
print 'Ignoring failure.'
def find(directory, pattern=None, exclude=None):
print 'Looking for paths in %r matching %r' % (directory, pattern)
matches = []
misses = []
if exclude is None:
exclude = []
directory = os.path.abspath(directory)
for root, dirs, files in os.walk(directory):
for basename in dirs + files:
if basename in exclude:
if basename in dirs:
dirs.remove(basename)
continue
path = os.path.join(root, basename)
if pattern is None or re.search(pattern, path):
matches.append(path)
else:
misses.append(path)
print 'Found %d matches and %d misses' % (len(matches), len(misses))
return matches, misses
def rm(path):
print 'Deleting %r' % path
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
pass
def strip(path):
run('arm-eabi-strip %s' % path)
def zipup(out_path, in_path, top, exclude=None, prefix=''):
zip_file = zipfile.ZipFile(out_path, 'w', compression=zipfile.ZIP_DEFLATED)
for path in find(in_path, exclude=exclude)[0]:
if not os.path.isdir(path):
arcname = prefix + path[len(top):].lstrip('/')
print 'Adding %s to %s' % (arcname, out_path)
zip_file.write(path, arcname)
zip_file.close()
# Find Android source path and put it in the environment.
gcc_path = subprocess.Popen(['which', 'arm-eabi-gcc'],
stdout=subprocess.PIPE).communicate()[0]
match = re.match(r'(.*)/prebuilt', gcc_path)
if match is None:
print 'Could not find arm-eabi-gcc on your path.'
sys.exit(1)
android_src = match.group(1)
os.environ['ANDROID_SRC'] = android_src
os.environ['SL4A_TRUNK'] = os.path.abspath('..');
print os.environ['SL4A_TRUNK']
agcc_path = subprocess.Popen(['which', 'agcc'],
stdout=subprocess.PIPE).communicate()[0]
if agcc_path == '':
print 'Could not find agcc on your path.'
sys.exit(1)
pwd = os.getcwd()
os.chdir('src')
assert os.path.exists('Parser/hostpgen'), 'hostpgen not found'
run('make')
#run('make install -k', False)
run('make install')
assert os.path.exists('android'), 'build result not found'
print 'Installing xmppy.'
xmpppy_path = os.path.join(pwd, 'xmpppy', 'xmpp')
compileall.compile_dir(xmpppy_path)
shutil.copytree(xmpppy_path, 'android/python/lib/python2.6/xmpp')
print 'Installing BeautifulSoup.'
beautifulsoup_path = os.path.join(pwd, 'BeautifulSoup')
compileall.compile_dir(beautifulsoup_path)
shutil.copy(os.path.join(beautifulsoup_path, 'BeautifulSoup.pyc'),
'android/python/lib/python2.6/BeautifulSoup.pyc')
print 'Installing gdata.'
gdata_path = os.path.join(pwd, 'gdata')
run('python setup.py build', cwd=gdata_path)
gdata_build_path = os.path.join(gdata_path, 'build')
gdata_result_path = os.path.join(gdata_build_path,
os.listdir(gdata_build_path)[0])
compileall.compile_dir(gdata_result_path)
shutil.copytree(os.path.join(gdata_result_path, 'gdata'),
'android/python/lib/python2.6/gdata')
shutil.copytree(os.path.join(gdata_result_path, 'atom'),
'android/python/lib/python2.6/atom')
print 'Installing python-twitter.'
twitter_path = os.path.join(pwd, 'python-twitter')
compileall.compile_dir(twitter_path)
shutil.copy(os.path.join(twitter_path, 'twitter.pyc'),
'android/python/lib/python2.6/twitter.pyc')
print 'Installing simplejson.'
simplejson_path = os.path.join(pwd, 'python-twitter', 'simplejson')
compileall.compile_dir(simplejson_path)
shutil.copytree(simplejson_path, 'android/python/lib/python2.6/simplejson')
print 'Removing unecessary files and directories from installation.'
map(rm, find('android/python/bin', 'python$')[1])
map(rm, find('android', '\.py$')[0])
map(rm, find('android', '\.c$')[0])
map(rm, find('android', 'test')[0])
map(rm, find('android', '\.pyo$')[0])
rm('android/python/share')
rm('android/python/include')
rm('android/python/lib/libpython2.6.a')
map(strip, find('android', '\.so$')[0])
strip('android/python/bin/python')
libs_to_remove = [
'compiler',
'config',
'curses',
'distutils',
'hotshot',
'idlelib',
'lib2to3',
'lib-old',
'lib-tk',
'multiprocessing',
'site-packages',
]
for lib in libs_to_remove:
rm('android/python/lib/python2.6/'+lib)
# Remove any existing zip files.
for p in glob.glob(os.path.join(pwd, '*.zip')):
rm(p)
print 'Zipping up standard library.'
libs = os.path.join(pwd, 'src/android/python/lib/python2.6')
# Copy in ASE's Android module.
shutil.copy(os.path.join(pwd, 'ase', 'android.py'),
'android/python/lib/python2.6')
zipup(os.path.join(pwd, 'python_extras.zip'), libs, libs,
exclude=['lib-dynload'], prefix='python/')
map(rm, find(libs, exclude=['lib-dynload'])[0])
print 'Zipping up Python interpreter for deployment.'
zipup(os.path.join(pwd, 'python.zip'),
os.path.join(pwd, 'src', 'android', 'python'),
os.path.join(pwd, 'src', 'android'))
print 'Zipping up Python scripts.'
zipup(os.path.join(pwd, 'python_scripts.zip'),
os.path.join(pwd, 'ase', 'scripts'),
os.path.join(pwd, 'ase', 'scripts'))
print 'Done.'
| apache-2.0 | -6,349,011,617,173,738,000 | 29.32 | 79 | 0.670679 | false | 3.183202 | false | false | false | 0.006926 |
lutraconsulting/qgis-moor-tools-plugin | Project Selector/settingsdialog.py | 1 | 4375 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
ProjectSelectorDialog
A QGIS plugin
Tool for selecting pre-defined QGIS projects.
-------------------
begin : 2013-12-04
copyright : (C) 2013 by Dartmoor National Park Authority
email : gi@dartmoor.gov.uk
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from qgis.PyQt import QtCore, uic
from qgis.PyQt.QtWidgets import QDialog, QFileDialog, QMessageBox
# create the dialog for zoom to point
PARENT_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULTS = os.path.join(PARENT_DIR, 'defaults.txt')
ui_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ui_settings.ui')
class SettingsDialog(QDialog):
def __init__(self):
QDialog.__init__(self)
# Set up the user interface from Designer.
self.ui = uic.loadUi(ui_file, self)
self.settings = QtCore.QSettings()
# Populate the values
with open(DEFAULTS) as paths:
projects = paths.readline().strip().split(':', 1)[-1]
templates = paths.readline().strip().split(':', 1)[-1]
self.ui.projectsFolderLineEdit.setText(projects)
self.ui.templateRootLineEdit.setText(templates)
project_selector_enabled = self.settings.value("SelectorTools/ProjectSelector/isEnabled", True, type=bool)
identifiable_only = self.settings.value("SelectorTools/ProjectSelector/identifiableOnly", True, type=bool)
self.ui.projectSelectorEnabledCheckBox.setChecked(project_selector_enabled)
self.ui.identifiableOnly.setChecked(identifiable_only)
def browseForProjectRoot(self):
startingDir = str(self.settings.value("SelectorTools/ProjectSelector/projectRoot", os.path.expanduser("~"), type=str))
d = str(QFileDialog.getExistingDirectory(None, 'Select Projects Folder', startingDir))
if d != os.sep and d.lower() != 'c:\\' and d != '':
self.ui.projectsFolderLineEdit.setText(d)
def browseForTemplateRoot(self):
startingDir = str(self.settings.value("SelectorTools/TemplateSelector/templateRoot", os.path.expanduser("~"), type=str))
d = str(QFileDialog.getExistingDirectory(None, 'Select Root of Template Folder Structure', startingDir))
if d != os.sep and d.lower() != 'c:\\' and d != '':
self.ui.templateRootLineEdit.setText(d)
def accept(self):
projects = self.ui.projectsFolderLineEdit.text()
self.settings.setValue("SelectorTools/ProjectSelector/projectRoot", projects)
templates = self.ui.templateRootLineEdit.text()
self.settings.setValue("SelectorTools/TemplateSelector/templateRoot", templates)
try:
with open(DEFAULTS, 'w') as paths:
paths.write('projects:{}\n'.format(projects))
paths.write('templates:{}\n'.format(templates))
except IOError:
QMessageBox.warning(None, \
'Could not save folders', \
'%s could not be opened for writing, please ensure you have permission to edit this file.' \
% DEFAULTS )
project_selector_enabled = self.ui.projectSelectorEnabledCheckBox.isChecked()
self.settings.setValue("SelectorTools/ProjectSelector/isEnabled", project_selector_enabled)
identifiable_only = self.ui.identifiableOnly.isChecked()
self.settings.setValue("SelectorTools/ProjectSelector/identifiableOnly", identifiable_only)
QDialog.accept(self)
| gpl-2.0 | 2,809,398,518,857,308,700 | 50.470588 | 130 | 0.568229 | false | 4.639449 | false | false | false | 0.004571 |
invisiblek/python-for-android | python3-alpha/python3-src/Lib/macpath.py | 57 | 5617 | """Pathname and path-related operations for the Macintosh."""
import os
from stat import *
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"expanduser","expandvars","normpath","abspath",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames"]
# strings representing various path-related bits and pieces
# These are primarily for export; internally, they are hardcoded.
curdir = ':'
pardir = '::'
extsep = '.'
sep = ':'
pathsep = '\n'
defpath = ':'
altsep = None
devnull = 'Dev:Null'
def _get_colon(path):
if isinstance(path, bytes):
return b':'
else:
return ':'
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
if not isinstance(path, (bytes, str)):
raise TypeError("normcase() argument must be str or bytes, "
"not '{}'".format(path.__class__.__name__))
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
colon = _get_colon(s)
return colon in s and s[:1] != colon
def join(s, *p):
colon = _get_colon(s)
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == colon:
t = t[1:]
if colon not in path:
path = colon + path
if path[-1:] != colon:
path = path + colon
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
colon = _get_colon(s)
if colon not in s: return s[:0], s
col = 0
for i in range(len(s)):
if s[i:i+1] == colon: col = i + 1
path, file = s[:col-1], s[col:]
if path and not colon in path:
path = path + colon
return path, file
def splitext(p):
if isinstance(p, bytes):
return genericpath._splitext(p, b':', altsep, b'.')
else:
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
def splitdrive(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return p[:0], p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and not components[1]
def islink(s):
"""Return true if the pathname refers to a symbolic link."""
try:
import Carbon.File
return Carbon.File.ResolveAliasFile(s, 0)[2]
except:
return False
# Is `stat`/`lstat` a meaningful difference on the Mac? This is safe in any
# case.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except os.error:
return False
return True
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
class norm_error(Exception):
"""Path cannot be normalized"""
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
colon = _get_colon(s)
if colon not in s:
return colon + s
comps = s.split(colon)
i = 1
while i < len(comps)-1:
if not comps[i] and comps[i-1]:
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error('Cannot use :: immediately after volume name')
else:
i = i + 1
s = colon.join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1:] == colon and len(comps) > 2 and s != colon*len(s):
s = s[:-1]
return s
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, bytes):
cwd = os.getcwdb()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# realpath is a no-op on systems without islink support
def realpath(path):
path = abspath(path)
try:
import Carbon.File
except ImportError:
return path
if not path:
return path
colon = _get_colon(path)
components = path.split(colon)
path = components[0] + colon
for c in components[1:]:
path = join(path, c)
try:
path = Carbon.File.FSResolveAliasFile(path, 1)[0].as_pathname()
except Carbon.File.Error:
pass
return path
supports_unicode_filenames = True
| apache-2.0 | 7,293,514,179,746,127,000 | 26.4 | 79 | 0.598006 | false | 3.774866 | false | false | false | 0.007477 |
qingqing01/models | generate_chinese_poetry/generate.py | 4 | 3580 | import os
import sys
import gzip
import logging
import numpy as np
import click
import reader
import paddle.v2 as paddle
from paddle.v2.layer import parse_network
from network_conf import encoder_decoder_network
logger = logging.getLogger("paddle")
logger.setLevel(logging.WARNING)
def infer_a_batch(inferer, test_batch, beam_size, id_to_text, fout):
beam_result = inferer.infer(input=test_batch, field=["prob", "id"])
gen_sen_idx = np.where(beam_result[1] == -1)[0]
assert len(gen_sen_idx) == len(test_batch) * beam_size, ("%d vs. %d" % (
len(gen_sen_idx), len(test_batch) * beam_size))
start_pos, end_pos = 1, 0
for i, sample in enumerate(test_batch):
fout.write("%s\n" % (
" ".join([id_to_text[w] for w in sample[0][1:-1]])
)) # skip the start and ending mark when print the source sentence
for j in xrange(beam_size):
end_pos = gen_sen_idx[i * beam_size + j]
fout.write("%s\n" % ("%.4f\t%s" % (beam_result[0][i][j], " ".join(
id_to_text[w] for w in beam_result[1][start_pos:end_pos - 1]))))
start_pos = end_pos + 2
fout.write("\n")
fout.flush
@click.command("generate")
@click.option(
"--model_path",
default="",
help="The path of the trained model for generation.")
@click.option(
"--word_dict_path", required=True, help="The path of word dictionary.")
@click.option(
"--test_data_path",
required=True,
help="The path of input data for generation.")
@click.option(
"--batch_size",
default=1,
help="The number of testing examples in one forward pass in generation.")
@click.option(
"--beam_size", default=5, help="The beam expansion in beam search.")
@click.option(
"--save_file",
required=True,
help="The file path to save the generated results.")
@click.option(
"--use_gpu", default=False, help="Whether to use GPU in generation.")
def generate(model_path, word_dict_path, test_data_path, batch_size, beam_size,
save_file, use_gpu):
assert os.path.exists(model_path), "The given model does not exist."
assert os.path.exists(test_data_path), "The given test data does not exist."
with gzip.open(model_path, "r") as f:
parameters = paddle.parameters.Parameters.from_tar(f)
id_to_text = {}
assert os.path.exists(
word_dict_path), "The given word dictionary path does not exist."
with open(word_dict_path, "r") as f:
for i, line in enumerate(f):
id_to_text[i] = line.strip().split("\t")[0]
paddle.init(use_gpu=use_gpu, trainer_count=1)
beam_gen = encoder_decoder_network(
word_count=len(id_to_text),
emb_dim=512,
encoder_depth=3,
encoder_hidden_dim=512,
decoder_depth=3,
decoder_hidden_dim=512,
bos_id=0,
eos_id=1,
max_length=9,
beam_size=beam_size,
is_generating=True)
inferer = paddle.inference.Inference(
output_layer=beam_gen, parameters=parameters)
test_batch = []
with open(save_file, "w") as fout:
for idx, item in enumerate(
reader.gen_reader(test_data_path, word_dict_path)()):
test_batch.append([item])
if len(test_batch) == batch_size:
infer_a_batch(inferer, test_batch, beam_size, id_to_text, fout)
test_batch = []
if len(test_batch):
infer_a_batch(inferer, test_batch, beam_size, id_to_text, fout)
test_batch = []
if __name__ == "__main__":
generate()
| apache-2.0 | 2,936,414,473,240,789,500 | 32.457944 | 80 | 0.606983 | false | 3.308688 | true | false | false | 0.000559 |
sinkuri256/python-for-android | python3-alpha/python3-src/Lib/shlex.py | 51 | 11100 | """A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
import os.path
import sys
from collections import deque
from io import StringIO
__all__ = ["shlex", "split"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False):
if isinstance(instream, str):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if self.debug:
print('shlex: reading from %s, line %d' \
% (self.instream, self.lineno))
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print("shlex: pushing token " + repr(tok))
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, str):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print('shlex: pushing to file %s' % (self.infile,))
else:
print('shlex: pushing to stream %s' % (self.instream,))
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print('shlex: popping to %s, line %d' \
% (self.instream, self.lineno))
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print("shlex: popping token " + repr(tok))
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print("shlex: token=" + repr(raw))
else:
print("shlex: token=EOF")
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print("shlex: in state", repr(self.state), \
"I see character:", repr(nextchar))
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in whitespace state")
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError("No closing quotation")
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in escape state")
# XXX what error should be raised here?
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in word state")
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes \
or self.whitespace_split:
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print("shlex: I see punctuation in word state")
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print("shlex: raw token=" + repr(result))
else:
print("shlex: raw token=EOF")
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, str) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True):
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = shlex()
else:
file = sys.argv[1]
lexer = shlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
print("Token: " + repr(tt))
else:
break
| apache-2.0 | -8,029,511,807,110,584,000 | 37.326389 | 76 | 0.4894 | false | 4.538651 | false | false | false | 0.000725 |
nanolearning/edx-platform | lms/djangoapps/certificates/migrations/0001_added_generatedcertificates.py | 188 | 6863 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GeneratedCertificate'
db.create_table('certificates_generatedcertificate', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('certificate_id', self.gf('django.db.models.fields.CharField')(max_length=32)),
))
db.send_create_signal('certificates', ['GeneratedCertificate'])
def backwards(self, orm):
# Deleting model 'GeneratedCertificate'
db.delete_table('certificates_generatedcertificate')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'certificates.generatedcertificate': {
'Meta': {'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
| agpl-3.0 | -1,453,809,865,872,088,600 | 72.795699 | 182 | 0.565788 | false | 3.793809 | false | false | false | 0.008305 |
ogenstad/ansible | lib/ansible/modules/network/aireos/aireos_command.py | 73 | 6929 | #!/usr/bin/python
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: aireos_command
version_added: "2.4"
author: "James Mighion (@jmighion)"
short_description: Run commands on remote devices running Cisco WLC
description:
- Sends arbitrary commands to an aireos node and returns the results
read from the device. This module includes an
argument that will cause the module to wait for a specific condition
before returning or timing out if the condition is not met.
- This module does not support running commands in configuration mode.
Please use M(aireos_config) to configure WLC devices.
extends_documentation_fragment: aireos
options:
commands:
description:
- List of commands to send to the remote aireos device over the
configured provider. The resulting output from the command
is returned. If the I(wait_for) argument is provided, the
module is not returned until the condition is satisfied or
the number of retries has expired.
required: true
wait_for:
description:
- List of conditions to evaluate against the output of the
command. The task will wait for each condition to be true
before moving forward. If the conditional is not true
within the configured number of retries, the task fails.
See examples.
aliases: ['waitfor']
match:
description:
- The I(match) argument is used in conjunction with the
I(wait_for) argument to specify the match policy. Valid
values are C(all) or C(any). If the value is set to C(all)
then all conditionals in the wait_for must be satisfied. If
the value is set to C(any) then only one of the values must be
satisfied.
default: all
choices: ['any', 'all']
retries:
description:
- Specifies the number of retries a command should by tried
before it is considered failed. The command is run on the
target device every retry and evaluated against the
I(wait_for) conditions.
default: 10
interval:
description:
- Configures the interval in seconds to wait between retries
of the command. If the command does not pass the specified
conditions, the interval indicates how long to wait before
trying the command again.
default: 1
"""
EXAMPLES = """
tasks:
- name: run show sysinfo on remote devices
aireos_command:
commands: show sysinfo
- name: run show sysinfo and check to see if output contains Cisco Controller
aireos_command:
commands: show sysinfo
wait_for: result[0] contains 'Cisco Controller'
- name: run multiple commands on remote nodes
aireos_command:
commands:
- show sysinfo
- show interface summary
- name: run multiple commands and evaluate the output
aireos_command:
commands:
- show sysinfo
- show interface summary
wait_for:
- result[0] contains Cisco Controller
- result[1] contains Loopback0
"""
RETURN = """
stdout:
description: The set of responses from the commands
returned: always apart from low level errors (such as action plugin)
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always apart from low level errors (such as action plugin)
type: list
sample: [['...', '...'], ['...'], ['...']]
failed_conditions:
description: The list of conditionals that have failed
returned: failed
type: list
sample: ['...', '...']
"""
import time
from ansible.module_utils.network.aireos.aireos import run_commands
from ansible.module_utils.network.aireos.aireos import aireos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.network.common.parsing import Conditional
from ansible.module_utils.six import string_types
def to_lines(stdout):
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
yield item
def parse_commands(module, warnings):
command = ComplexList(dict(
command=dict(key=True),
prompt=dict(),
answer=dict()
), module)
commands = command(module.params['commands'])
for index, item in enumerate(commands):
if module.check_mode and not item['command'].startswith('show'):
warnings.append(
'only show commands are supported when using check mode, not '
'executing `%s`' % item['command']
)
elif item['command'].startswith('conf'):
module.fail_json(
msg='aireos_command does not support running config mode '
'commands. Please use aireos_config instead'
)
return commands
def main():
"""main entry point for module execution
"""
argument_spec = dict(
commands=dict(type='list', required=True),
wait_for=dict(type='list', aliases=['waitfor']),
match=dict(default='all', choices=['all', 'any']),
retries=dict(default=10, type='int'),
interval=dict(default=1, type='int')
)
argument_spec.update(aireos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
warnings = list()
check_args(module, warnings)
commands = parse_commands(module, warnings)
result['warnings'] = warnings
wait_for = module.params['wait_for'] or list()
conditionals = [Conditional(c) for c in wait_for]
retries = module.params['retries']
interval = module.params['interval']
match = module.params['match']
while retries > 0:
responses = run_commands(module, commands)
for item in list(conditionals):
if item(responses):
if match == 'any':
conditionals = list()
break
conditionals.remove(item)
if not conditionals:
break
time.sleep(interval)
retries -= 1
if conditionals:
failed_conditions = [item.raw for item in conditionals]
msg = 'One or more conditional statements have not been satisfied'
module.fail_json(msg=msg, failed_conditions=failed_conditions)
result.update({
'changed': False,
'stdout': responses,
'stdout_lines': list(to_lines(responses))
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,772,420,983,546,892,000 | 31.078704 | 92 | 0.649589 | false | 4.295722 | true | false | false | 0.001299 |
mbalasso/mynumpy | numpy/f2py/crackfortran.py | 1 | 117075 | #!/usr/bin/env python
"""
crackfortran --- read fortran (77,90) code and extract declaration information.
Usage is explained in the comment block below.
Copyright 1999-2004 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/09/27 07:13:49 $
Pearu Peterson
"""
__version__ = "$Revision: 1.177 $"[10:-1]
import platform
import __version__
f2py_version = __version__.version
"""
Usage of crackfortran:
======================
Command line keys: -quiet,-verbose,-fix,-f77,-f90,-show,-h <pyffilename>
-m <module name for f77 routines>,--ignore-contains
Functions: crackfortran, crack2fortran
The following Fortran statements/constructions are supported
(or will be if needed):
block data,byte,call,character,common,complex,contains,data,
dimension,double complex,double precision,end,external,function,
implicit,integer,intent,interface,intrinsic,
logical,module,optional,parameter,private,public,
program,real,(sequence?),subroutine,type,use,virtual,
include,pythonmodule
Note: 'virtual' is mapped to 'dimension'.
Note: 'implicit integer (z) static (z)' is 'implicit static (z)' (this is minor bug).
Note: code after 'contains' will be ignored until its scope ends.
Note: 'common' statement is extended: dimensions are moved to variable definitions
Note: f2py directive: <commentchar>f2py<line> is read as <line>
Note: pythonmodule is introduced to represent Python module
Usage:
`postlist=crackfortran(files,funcs)`
`postlist` contains declaration information read from the list of files `files`.
`crack2fortran(postlist)` returns a fortran code to be saved to pyf-file
`postlist` has the following structure:
*** it is a list of dictionaries containing `blocks':
B = {'block','body','vars','parent_block'[,'name','prefix','args','result',
'implicit','externals','interfaced','common','sortvars',
'commonvars','note']}
B['block'] = 'interface' | 'function' | 'subroutine' | 'module' |
'program' | 'block data' | 'type' | 'pythonmodule'
B['body'] --- list containing `subblocks' with the same structure as `blocks'
B['parent_block'] --- dictionary of a parent block:
C['body'][<index>]['parent_block'] is C
B['vars'] --- dictionary of variable definitions
B['sortvars'] --- dictionary of variable definitions sorted by dependence (independent first)
B['name'] --- name of the block (not if B['block']=='interface')
B['prefix'] --- prefix string (only if B['block']=='function')
B['args'] --- list of argument names if B['block']== 'function' | 'subroutine'
B['result'] --- name of the return value (only if B['block']=='function')
B['implicit'] --- dictionary {'a':<variable definition>,'b':...} | None
B['externals'] --- list of variables being external
B['interfaced'] --- list of variables being external and defined
B['common'] --- dictionary of common blocks (list of objects)
B['commonvars'] --- list of variables used in common blocks (dimensions are moved to variable definitions)
B['from'] --- string showing the 'parents' of the current block
B['use'] --- dictionary of modules used in current block:
{<modulename>:{['only':<0|1>],['map':{<local_name1>:<use_name1>,...}]}}
B['note'] --- list of LaTeX comments on the block
B['f2pyenhancements'] --- optional dictionary
{'threadsafe':'','fortranname':<name>,
'callstatement':<C-expr>|<multi-line block>,
'callprotoargument':<C-expr-list>,
'usercode':<multi-line block>|<list of multi-line blocks>,
'pymethoddef:<multi-line block>'
}
B['entry'] --- dictionary {entryname:argslist,..}
B['varnames'] --- list of variable names given in the order of reading the
Fortran code, useful for derived types.
B['saved_interface'] --- a string of scanned routine signature, defines explicit interface
*** Variable definition is a dictionary
D = B['vars'][<variable name>] =
{'typespec'[,'attrspec','kindselector','charselector','=','typename']}
D['typespec'] = 'byte' | 'character' | 'complex' | 'double complex' |
'double precision' | 'integer' | 'logical' | 'real' | 'type'
D['attrspec'] --- list of attributes (e.g. 'dimension(<arrayspec>)',
'external','intent(in|out|inout|hide|c|callback|cache|aligned4|aligned8|aligned16)',
'optional','required', etc)
K = D['kindselector'] = {['*','kind']} (only if D['typespec'] =
'complex' | 'integer' | 'logical' | 'real' )
C = D['charselector'] = {['*','len','kind']}
(only if D['typespec']=='character')
D['='] --- initialization expression string
D['typename'] --- name of the type if D['typespec']=='type'
D['dimension'] --- list of dimension bounds
D['intent'] --- list of intent specifications
D['depend'] --- list of variable names on which current variable depends on
D['check'] --- list of C-expressions; if C-expr returns zero, exception is raised
D['note'] --- list of LaTeX comments on the variable
*** Meaning of kind/char selectors (few examples):
D['typespec>']*K['*']
D['typespec'](kind=K['kind'])
character*C['*']
character(len=C['len'],kind=C['kind'])
(see also fortran type declaration statement formats below)
Fortran 90 type declaration statement format (F77 is subset of F90)
====================================================================
(Main source: IBM XL Fortran 5.1 Language Reference Manual)
type declaration = <typespec> [[<attrspec>]::] <entitydecl>
<typespec> = byte |
character[<charselector>] |
complex[<kindselector>] |
double complex |
double precision |
integer[<kindselector>] |
logical[<kindselector>] |
real[<kindselector>] |
type(<typename>)
<charselector> = * <charlen> |
([len=]<len>[,[kind=]<kind>]) |
(kind=<kind>[,len=<len>])
<kindselector> = * <intlen> |
([kind=]<kind>)
<attrspec> = comma separated list of attributes.
Only the following attributes are used in
building up the interface:
external
(parameter --- affects '=' key)
optional
intent
Other attributes are ignored.
<intentspec> = in | out | inout
<arrayspec> = comma separated list of dimension bounds.
<entitydecl> = <name> [[*<charlen>][(<arrayspec>)] | [(<arrayspec>)]*<charlen>]
[/<init_expr>/ | =<init_expr>] [,<entitydecl>]
In addition, the following attributes are used: check,depend,note
TODO:
* Apply 'parameter' attribute (e.g. 'integer parameter :: i=2' 'real x(i)'
-> 'real x(2)')
The above may be solved by creating appropriate preprocessor program, for example.
"""
#
import sys
import string
import fileinput
import re
import pprint
import os
import copy
from auxfuncs import *
# Global flags:
strictf77=1 # Ignore `!' comments unless line[0]=='!'
sourcecodeform='fix' # 'fix','free'
quiet=0 # Be verbose if 0 (Obsolete: not used any more)
verbose=1 # Be quiet if 0, extra verbose if > 1.
tabchar=4*' '
pyffilename=''
f77modulename=''
skipemptyends=0 # for old F77 programs without 'program' statement
ignorecontains=1
dolowercase=1
debug=[]
## do_analyze = 1
###### global variables
## use reload(crackfortran) to reset these variables
groupcounter=0
grouplist={groupcounter:[]}
neededmodule=-1
expectbegin=1
skipblocksuntil=-1
usermodules=[]
f90modulevars={}
gotnextfile=1
filepositiontext=''
currentfilename=''
skipfunctions=[]
skipfuncs=[]
onlyfuncs=[]
include_paths=[]
previous_context = None
###### Some helper functions
def show(o,f=0):pprint.pprint(o)
errmess=sys.stderr.write
def outmess(line,flag=1):
global filepositiontext
if not verbose: return
if not quiet:
if flag:sys.stdout.write(filepositiontext)
sys.stdout.write(line)
re._MAXCACHE=50
defaultimplicitrules={}
for c in "abcdefghopqrstuvwxyz$_": defaultimplicitrules[c]={'typespec':'real'}
for c in "ijklmn": defaultimplicitrules[c]={'typespec':'integer'}
del c
badnames={}
invbadnames={}
for n in ['int','double','float','char','short','long','void','case','while',
'return','signed','unsigned','if','for','typedef','sizeof','union',
'struct','static','register','new','break','do','goto','switch',
'continue','else','inline','extern','delete','const','auto',
'len','rank','shape','index','slen','size','_i',
'max', 'min',
'flen','fshape',
'string','complex_double','float_double','stdin','stderr','stdout',
'type','default']:
badnames[n]=n+'_bn'
invbadnames[n+'_bn']=n
def rmbadname1(name):
if name in badnames:
errmess('rmbadname1: Replacing "%s" with "%s".\n'%(name,badnames[name]))
return badnames[name]
return name
def rmbadname(names): return map(rmbadname1,names)
def undo_rmbadname1(name):
if name in invbadnames:
errmess('undo_rmbadname1: Replacing "%s" with "%s".\n'\
%(name,invbadnames[name]))
return invbadnames[name]
return name
def undo_rmbadname(names): return map(undo_rmbadname1,names)
def getextension(name):
i=name.rfind('.')
if i==-1: return ''
if '\\' in name[i:]: return ''
if '/' in name[i:]: return ''
return name[i+1:]
is_f_file = re.compile(r'.*[.](for|ftn|f77|f)\Z',re.I).match
_has_f_header = re.compile(r'-[*]-\s*fortran\s*-[*]-',re.I).search
_has_f90_header = re.compile(r'-[*]-\s*f90\s*-[*]-',re.I).search
_has_fix_header = re.compile(r'-[*]-\s*fix\s*-[*]-',re.I).search
_free_f90_start = re.compile(r'[^c*]\s*[^\s\d\t]',re.I).match
def is_free_format(file):
"""Check if file is in free format Fortran."""
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
f = open(file,'r')
line = f.readline()
n = 15 # the number of non-comment lines to scan for hints
if _has_f_header(line):
n = 0
elif _has_f90_header(line):
n = 0
result = 1
while n>0 and line:
if line[0]!='!' and line.strip():
n -= 1
if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-2:-1]=='&':
result = 1
break
line = f.readline()
f.close()
return result
####### Read fortran (77,90) code
def readfortrancode(ffile,dowithline=show,istop=1):
"""
Read fortran codes from files and
1) Get rid of comments, line continuations, and empty lines; lower cases.
2) Call dowithline(line) on every line.
3) Recursively call itself when statement \"include '<filename>'\" is met.
"""
global gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\
beginpattern,quiet,verbose,dolowercase,include_paths
if not istop:
saveglobals=gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\
beginpattern,quiet,verbose,dolowercase
if ffile==[]: return
localdolowercase = dolowercase
cont=0
finalline=''
ll=''
commentline=re.compile(r'(?P<line>([^"]*["][^"]*["][^"!]*|[^\']*\'[^\']*\'[^\'!]*|[^!\'"]*))!{1}(?P<rest>.*)')
includeline=re.compile(r'\s*include\s*(\'|")(?P<name>[^\'"]*)(\'|")',re.I)
cont1=re.compile(r'(?P<line>.*)&\s*\Z')
cont2=re.compile(r'(\s*&|)(?P<line>.*)')
mline_mark = re.compile(r".*?'''")
if istop: dowithline('',-1)
ll,l1='',''
spacedigits=[' ']+map(str,range(10))
filepositiontext=''
fin=fileinput.FileInput(ffile)
while 1:
l=fin.readline()
if not l: break
if fin.isfirstline():
filepositiontext=''
currentfilename=fin.filename()
gotnextfile=1
l1=l
strictf77=0
sourcecodeform='fix'
ext = os.path.splitext(currentfilename)[1]
if is_f_file(currentfilename) and \
not (_has_f90_header(l) or _has_fix_header(l)):
strictf77=1
elif is_free_format(currentfilename) and not _has_fix_header(l):
sourcecodeform='free'
if strictf77: beginpattern=beginpattern77
else: beginpattern=beginpattern90
outmess('\tReading file %s (format:%s%s)\n'\
%(`currentfilename`,sourcecodeform,
strictf77 and ',strict' or ''))
l=l.expandtabs().replace('\xa0',' ')
while not l=='': # Get rid of newline characters
if l[-1] not in "\n\r\f": break
l=l[:-1]
if not strictf77:
r=commentline.match(l)
if r:
l=r.group('line')+' ' # Strip comments starting with `!'
rl=r.group('rest')
if rl[:4].lower()=='f2py': # f2py directive
l = l + 4*' '
r=commentline.match(rl[4:])
if r: l=l+r.group('line')
else: l = l + rl[4:]
if l.strip()=='': # Skip empty line
cont=0
continue
if sourcecodeform=='fix':
if l[0] in ['*','c','!','C','#']:
if l[1:5].lower()=='f2py': # f2py directive
l=' '+l[5:]
else: # Skip comment line
cont=0
continue
elif strictf77:
if len(l)>72: l=l[:72]
if not (l[0] in spacedigits):
raise Exception('readfortrancode: Found non-(space,digit) char '
'in the first column.\n\tAre you sure that '
'this code is in fix form?\n\tline=%s' % `l`)
if (not cont or strictf77) and (len(l)>5 and not l[5]==' '):
# Continuation of a previous line
ll=ll+l[6:]
finalline=''
origfinalline=''
else:
if not strictf77:
# F90 continuation
r=cont1.match(l)
if r: l=r.group('line') # Continuation follows ..
if cont:
ll=ll+cont2.match(l).group('line')
finalline=''
origfinalline=''
else:
l=' '+l[5:] # clean up line beginning from possible digits.
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline=ll
ll=l
cont=(r is not None)
else:
l=' '+l[5:] # clean up line beginning from possible digits.
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline =ll
ll=l
elif sourcecodeform=='free':
if not cont and ext=='.pyf' and mline_mark.match(l):
l = l + '\n'
while 1:
lc = fin.readline()
if not lc:
errmess('Unexpected end of file when reading multiline\n')
break
l = l + lc
if mline_mark.match(lc):
break
l = l.rstrip()
r=cont1.match(l)
if r: l=r.group('line') # Continuation follows ..
if cont:
ll=ll+cont2.match(l).group('line')
finalline=''
origfinalline=''
else:
if localdolowercase: finalline=ll.lower()
else: finalline=ll
origfinalline =ll
ll=l
cont=(r is not None)
else:
raise ValueError("Flag sourcecodeform must be either 'fix' or 'free': %s"%`sourcecodeform`)
filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1)
m=includeline.match(origfinalline)
if m:
fn=m.group('name')
if os.path.isfile(fn):
readfortrancode(fn,dowithline=dowithline,istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir,fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1,dowithline=dowithline,istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(`fn`, os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
l1=ll
if localdolowercase:
finalline=ll.lower()
else: finalline=ll
origfinalline = ll
filepositiontext='Line #%d in %s:"%s"\n\t' % (fin.filelineno()-1,currentfilename,l1)
m=includeline.match(origfinalline)
if m:
fn=m.group('name')
if os.path.isfile(fn):
readfortrancode(fn,dowithline=dowithline,istop=0)
else:
include_dirs = [os.path.dirname(currentfilename)] + include_paths
foundfile = 0
for inc_dir in include_dirs:
fn1 = os.path.join(inc_dir,fn)
if os.path.isfile(fn1):
foundfile = 1
readfortrancode(fn1,dowithline=dowithline,istop=0)
break
if not foundfile:
outmess('readfortrancode: could not find include file %s in %s. Ignoring.\n'%(`fn`, os.pathsep.join(include_dirs)))
else:
dowithline(finalline)
filepositiontext=''
fin.close()
if istop: dowithline('',1)
else:
gotnextfile,filepositiontext,currentfilename,sourcecodeform,strictf77,\
beginpattern,quiet,verbose,dolowercase=saveglobals
########### Crack line
beforethisafter=r'\s*(?P<before>%s(?=\s*(\b(%s)\b)))'+ \
r'\s*(?P<this>(\b(%s)\b))'+ \
r'\s*(?P<after>%s)\s*\Z'
##
fortrantypes='character|logical|integer|real|complex|double\s*(precision\s*(complex|)|complex)|type(?=\s*\([\w\s,=(*)]*\))|byte'
typespattern=re.compile(beforethisafter%('',fortrantypes,fortrantypes,'.*'),re.I),'type'
typespattern4implicit=re.compile(beforethisafter%('',fortrantypes+'|static|automatic|undefined',fortrantypes+'|static|automatic|undefined','.*'),re.I)
#
functionpattern=re.compile(beforethisafter%('([a-z]+[\w\s(=*+-/)]*?|)','function','function','.*'),re.I),'begin'
subroutinepattern=re.compile(beforethisafter%('[a-z\s]*?','subroutine','subroutine','.*'),re.I),'begin'
#modulepattern=re.compile(beforethisafter%('[a-z\s]*?','module','module','.*'),re.I),'begin'
#
groupbegins77=r'program|block\s*data'
beginpattern77=re.compile(beforethisafter%('',groupbegins77,groupbegins77,'.*'),re.I),'begin'
groupbegins90=groupbegins77+r'|module(?!\s*procedure)|python\s*module|interface|type(?!\s*\()'
beginpattern90=re.compile(beforethisafter%('',groupbegins90,groupbegins90,'.*'),re.I),'begin'
groupends=r'end|endprogram|endblockdata|endmodule|endpythonmodule|endinterface'
endpattern=re.compile(beforethisafter%('',groupends,groupends,'[\w\s]*'),re.I),'end'
#endifs='end\s*(if|do|where|select|while|forall)'
endifs='(end\s*(if|do|where|select|while|forall))|(module\s*procedure)'
endifpattern=re.compile(beforethisafter%('[\w]*?',endifs,endifs,'[\w\s]*'),re.I),'endif'
#
implicitpattern=re.compile(beforethisafter%('','implicit','implicit','.*'),re.I),'implicit'
dimensionpattern=re.compile(beforethisafter%('','dimension|virtual','dimension|virtual','.*'),re.I),'dimension'
externalpattern=re.compile(beforethisafter%('','external','external','.*'),re.I),'external'
optionalpattern=re.compile(beforethisafter%('','optional','optional','.*'),re.I),'optional'
requiredpattern=re.compile(beforethisafter%('','required','required','.*'),re.I),'required'
publicpattern=re.compile(beforethisafter%('','public','public','.*'),re.I),'public'
privatepattern=re.compile(beforethisafter%('','private','private','.*'),re.I),'private'
intrisicpattern=re.compile(beforethisafter%('','intrisic','intrisic','.*'),re.I),'intrisic'
intentpattern=re.compile(beforethisafter%('','intent|depend|note|check','intent|depend|note|check','\s*\(.*?\).*'),re.I),'intent'
parameterpattern=re.compile(beforethisafter%('','parameter','parameter','\s*\(.*'),re.I),'parameter'
datapattern=re.compile(beforethisafter%('','data','data','.*'),re.I),'data'
callpattern=re.compile(beforethisafter%('','call','call','.*'),re.I),'call'
entrypattern=re.compile(beforethisafter%('','entry','entry','.*'),re.I),'entry'
callfunpattern=re.compile(beforethisafter%('','callfun','callfun','.*'),re.I),'callfun'
commonpattern=re.compile(beforethisafter%('','common','common','.*'),re.I),'common'
usepattern=re.compile(beforethisafter%('','use','use','.*'),re.I),'use'
containspattern=re.compile(beforethisafter%('','contains','contains',''),re.I),'contains'
formatpattern=re.compile(beforethisafter%('','format','format','.*'),re.I),'format'
## Non-fortran and f2py-specific statements
f2pyenhancementspattern=re.compile(beforethisafter%('','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','threadsafe|fortranname|callstatement|callprotoargument|usercode|pymethoddef','.*'),re.I|re.S),'f2pyenhancements'
multilinepattern = re.compile(r"\s*(?P<before>''')(?P<this>.*?)(?P<after>''')\s*\Z",re.S),'multiline'
##
def _simplifyargs(argsline):
a = []
for n in markoutercomma(argsline).split('@,@'):
for r in '(),':
n = n.replace(r,'_')
a.append(n)
return ','.join(a)
crackline_re_1 = re.compile(r'\s*(?P<result>\b[a-z]+[\w]*\b)\s*[=].*',re.I)
def crackline(line,reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
reset=1 --- final check if mismatch of blocks occured
Cracked data is saved in grouplist[0].
"""
global beginpattern,groupcounter,groupname,groupcache,grouplist,gotnextfile,\
filepositiontext,currentfilename,neededmodule,expectbegin,skipblocksuntil,\
skipemptyends,previous_context
if ';' in line and not (f2pyenhancementspattern[0].match(line) or
multilinepattern[0].match(line)):
for l in line.split(';'):
assert reset==0,`reset` # XXX: non-zero reset values need testing
crackline(l,reset)
return
if reset<0:
groupcounter=0
groupname={groupcounter:''}
groupcache={groupcounter:{}}
grouplist={groupcounter:[]}
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['block']=''
groupcache[groupcounter]['name']=''
neededmodule=-1
skipblocksuntil=-1
return
if reset>0:
fl=0
if f77modulename and neededmodule==groupcounter: fl=2
while groupcounter>fl:
outmess('crackline: groupcounter=%s groupname=%s\n'%(`groupcounter`,`groupname`))
outmess('crackline: Mismatch of blocks encountered. Trying to fix it by assuming "end" statement.\n')
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1
if f77modulename and neededmodule==groupcounter:
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end interface
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end module
neededmodule=-1
return
if line=='': return
flag=0
for pat in [dimensionpattern,externalpattern,intentpattern,optionalpattern,
requiredpattern,
parameterpattern,datapattern,publicpattern,privatepattern,
intrisicpattern,
endifpattern,endpattern,
formatpattern,
beginpattern,functionpattern,subroutinepattern,
implicitpattern,typespattern,commonpattern,
callpattern,usepattern,containspattern,
entrypattern,
f2pyenhancementspattern,
multilinepattern
]:
m = pat[0].match(line)
if m:
break
flag=flag+1
if not m:
re_1 = crackline_re_1
if 0<=skipblocksuntil<=groupcounter:return
if 'externals' in groupcache[groupcounter]:
for name in groupcache[groupcounter]['externals']:
if name in invbadnames:
name=invbadnames[name]
if 'interfaced' in groupcache[groupcounter] and name in groupcache[groupcounter]['interfaced']:
continue
m1=re.match(r'(?P<before>[^"]*)\b%s\b\s*@\(@(?P<args>[^@]*)@\)@.*\Z'%name,markouterparen(line),re.I)
if m1:
m2 = re_1.match(m1.group('before'))
a = _simplifyargs(m1.group('args'))
if m2:
line='callfun %s(%s) result (%s)'%(name,a,m2.group('result'))
else: line='callfun %s(%s)'%(name,a)
m = callfunpattern[0].match(line)
if not m:
outmess('crackline: could not resolve function call for line=%s.\n'%`line`)
return
analyzeline(m,'callfun',line)
return
if verbose>1 or (verbose==1 and currentfilename.lower().endswith('.pyf')):
previous_context = None
outmess('crackline:%d: No pattern for line\n'%(groupcounter))
return
elif pat[1]=='end':
if 0<=skipblocksuntil<groupcounter:
groupcounter=groupcounter-1
if skipblocksuntil<=groupcounter: return
if groupcounter<=0:
raise Exception('crackline: groupcounter(=%s) is nonpositive. '
'Check the blocks.' \
% (groupcounter))
m1 = beginpattern[0].match((line))
if (m1) and (not m1.group('this')==groupname[groupcounter]):
raise Exception('crackline: End group %s does not match with '
'previous Begin group %s\n\t%s' % \
(`m1.group('this')`, `groupname[groupcounter]`,
filepositiontext)
)
if skipblocksuntil==groupcounter:
skipblocksuntil=-1
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1
if not skipemptyends:
expectbegin=1
elif pat[1] == 'begin':
if 0<=skipblocksuntil<=groupcounter:
groupcounter=groupcounter+1
return
gotnextfile=0
analyzeline(m,pat[1],line)
expectbegin=0
elif pat[1]=='endif':
pass
elif pat[1]=='contains':
if ignorecontains: return
if 0<=skipblocksuntil<=groupcounter: return
skipblocksuntil=groupcounter
else:
if 0<=skipblocksuntil<=groupcounter:return
analyzeline(m,pat[1],line)
def markouterparen(line):
l='';f=0
for c in line:
if c=='(':
f=f+1
if f==1: l=l+'@(@'; continue
elif c==')':
f=f-1
if f==0: l=l+'@)@'; continue
l=l+c
return l
def markoutercomma(line,comma=','):
l='';f=0
cc=''
for c in line:
if (not cc or cc==')') and c=='(':
f=f+1
cc = ')'
elif not cc and c=='\'' and (not l or l[-1]!='\\'):
f=f+1
cc = '\''
elif c==cc:
f=f-1
if f==0:
cc=''
elif c==comma and f==0:
l=l+'@'+comma+'@'
continue
l=l+c
assert not f,`f,line,l,cc`
return l
def unmarkouterparen(line):
r = line.replace('@(@','(').replace('@)@',')')
return r
def appenddecl(decl,decl2,force=1):
if not decl: decl={}
if not decl2: return decl
if decl is decl2: return decl
for k in decl2.keys():
if k=='typespec':
if force or k not in decl:
decl[k]=decl2[k]
elif k=='attrspec':
for l in decl2[k]:
decl=setattrspec(decl,l,force)
elif k=='kindselector':
decl=setkindselector(decl,decl2[k],force)
elif k=='charselector':
decl=setcharselector(decl,decl2[k],force)
elif k in ['=','typename']:
if force or k not in decl:
decl[k]=decl2[k]
elif k=='note':
pass
elif k in ['intent','check','dimension','optional','required']:
errmess('appenddecl: "%s" not implemented.\n'%k)
else:
raise Exception('appenddecl: Unknown variable definition key:' + \
str(k))
return decl
selectpattern=re.compile(r'\s*(?P<this>(@\(@.*?@\)@|[*][\d*]+|[*]\s*@\(@.*?@\)@|))(?P<after>.*)\Z',re.I)
nameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*(@\(@\s*(?P<args>[\w\s,]*)\s*@\)@|)\s*((result(\s*@\(@\s*(?P<result>\b[\w$]+\b)\s*@\)@|))|(bind\s*@\(@\s*(?P<bind>.*)\s*@\)@))*\s*\Z',re.I)
callnameargspattern=re.compile(r'\s*(?P<name>\b[\w$]+\b)\s*@\(@\s*(?P<args>.*)\s*@\)@\s*\Z',re.I)
real16pattern = re.compile(r'([-+]?(?:\d+(?:\.\d*)?|\d*\.\d+))[dD]((?:[-+]?\d+)?)')
real8pattern = re.compile(r'([-+]?((?:\d+(?:\.\d*)?|\d*\.\d+))[eE]((?:[-+]?\d+)?)|(\d+\.\d*))')
_intentcallbackpattern = re.compile(r'intent\s*\(.*?\bcallback\b',re.I)
def _is_intent_callback(vdecl):
for a in vdecl.get('attrspec',[]):
if _intentcallbackpattern.match(a):
return 1
return 0
def _resolvenameargspattern(line):
line = markouterparen(line)
m1=nameargspattern.match(line)
if m1:
return m1.group('name'),m1.group('args'),m1.group('result'), m1.group('bind')
m1=callnameargspattern.match(line)
if m1:
return m1.group('name'),m1.group('args'),None, None
return None,[],None, None
def analyzeline(m,case,line):
global groupcounter,groupname,groupcache,grouplist,filepositiontext,\
currentfilename,f77modulename,neededinterface,neededmodule,expectbegin,\
gotnextfile,previous_context
block=m.group('this')
if case != 'multiline':
previous_context = None
if expectbegin and case not in ['begin','call','callfun','type'] \
and not skipemptyends and groupcounter<1:
newname=os.path.basename(currentfilename).split('.')[0]
outmess('analyzeline: no group yet. Creating program group with name "%s".\n'%newname)
gotnextfile=0
groupcounter=groupcounter+1
groupname[groupcounter]='program'
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['block']='program'
groupcache[groupcounter]['name']=newname
groupcache[groupcounter]['from']='fromsky'
expectbegin=0
if case in ['begin','call','callfun']:
# Crack line => block,name,args,result
block = block.lower()
if re.match(r'block\s*data',block,re.I): block='block data'
if re.match(r'python\s*module',block,re.I): block='python module'
name,args,result,bind = _resolvenameargspattern(m.group('after'))
if name is None:
if block=='block data':
name = '_BLOCK_DATA_'
else:
name = ''
if block not in ['interface','block data']:
outmess('analyzeline: No name/args pattern found for line.\n')
previous_context = (block,name,groupcounter)
if args: args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')])
else: args=[]
if '' in args:
while '' in args:
args.remove('')
outmess('analyzeline: argument list is malformed (missing argument).\n')
# end of crack line => block,name,args,result
needmodule=0
needinterface=0
if case in ['call','callfun']:
needinterface=1
if 'args' not in groupcache[groupcounter]:
return
if name not in groupcache[groupcounter]['args']:
return
for it in grouplist[groupcounter]:
if it['name']==name:
return
if name in groupcache[groupcounter]['interfaced']:
return
block={'call':'subroutine','callfun':'function'}[case]
if f77modulename and neededmodule==-1 and groupcounter<=1:
neededmodule=groupcounter+2
needmodule=1
if block != 'interface':
needinterface=1
# Create new block(s)
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
if needmodule:
if verbose>1:
outmess('analyzeline: Creating module block %s\n'%`f77modulename`,0)
groupname[groupcounter]='module'
groupcache[groupcounter]['block']='python module'
groupcache[groupcounter]['name']=f77modulename
groupcache[groupcounter]['from']=''
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
if needinterface:
if verbose>1:
outmess('analyzeline: Creating additional interface block (groupcounter=%s).\n' % (groupcounter),0)
groupname[groupcounter]='interface'
groupcache[groupcounter]['block']='interface'
groupcache[groupcounter]['name']='unknown_interface'
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name'])
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcounter=groupcounter+1
groupcache[groupcounter]={}
grouplist[groupcounter]=[]
groupname[groupcounter]=block
groupcache[groupcounter]['block']=block
if not name: name='unknown_'+block
groupcache[groupcounter]['prefix']=m.group('before')
groupcache[groupcounter]['name']=rmbadname1(name)
groupcache[groupcounter]['result']=result
if groupcounter==1:
groupcache[groupcounter]['from']=currentfilename
else:
if f77modulename and groupcounter==3:
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],currentfilename)
else:
groupcache[groupcounter]['from']='%s:%s'%(groupcache[groupcounter-1]['from'],groupcache[groupcounter-1]['name'])
for k in groupcache[groupcounter].keys():
if not groupcache[groupcounter][k]:
del groupcache[groupcounter][k]
groupcache[groupcounter]['args']=args
groupcache[groupcounter]['body']=[]
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['interfaced']=[]
groupcache[groupcounter]['vars']={}
groupcache[groupcounter]['entry']={}
# end of creation
if block=='type':
groupcache[groupcounter]['varnames'] = []
if case in ['call','callfun']: # set parents variables
if name not in groupcache[groupcounter-2]['externals']:
groupcache[groupcounter-2]['externals'].append(name)
groupcache[groupcounter]['vars']=copy.deepcopy(groupcache[groupcounter-2]['vars'])
#try: del groupcache[groupcounter]['vars'][groupcache[groupcounter-2]['name']]
#except: pass
try: del groupcache[groupcounter]['vars'][name][groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
except: pass
if block in ['function','subroutine']: # set global attributes
try: groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter-2]['vars'][''])
except: pass
if case=='callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
if not name==result:
groupcache[groupcounter]['vars'][name]=appenddecl(groupcache[groupcounter]['vars'][name],groupcache[groupcounter]['vars'][result])
#if groupcounter>1: # name is interfaced
try: groupcache[groupcounter-2]['interfaced'].append(name)
except: pass
if block=='function':
t=typespattern[0].match(m.group('before')+' '+name)
if t:
typespec,selector,attr,edecl=cracktypespec0(t.group('this'),t.group('after'))
updatevars(typespec,selector,attr,edecl)
if case in ['call','callfun']:
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end routine
grouplist[groupcounter-1].append(groupcache[groupcounter])
grouplist[groupcounter-1][-1]['body']=grouplist[groupcounter]
del grouplist[groupcounter]
groupcounter=groupcounter-1 # end interface
elif case=='entry':
name,args,result,bind=_resolvenameargspattern(m.group('after'))
if name is not None:
if args:
args=rmbadname([x.strip() for x in markoutercomma(args).split('@,@')])
else: args=[]
assert result is None,`result`
groupcache[groupcounter]['entry'][name] = args
previous_context = ('entry',name,groupcounter)
elif case=='type':
typespec,selector,attr,edecl=cracktypespec0(block,m.group('after'))
last_name = updatevars(typespec,selector,attr,edecl)
if last_name is not None:
previous_context = ('variable',last_name,groupcounter)
elif case in ['dimension','intent','optional','required','external','public','private','intrisic']:
edecl=groupcache[groupcounter]['vars']
ll=m.group('after').strip()
i=ll.find('::')
if i<0 and case=='intent':
i=markouterparen(ll).find('@)@')-2
ll=ll[:i+1]+'::'+ll[i+1:]
i=ll.find('::')
if ll[i:]=='::' and 'args' in groupcache[groupcounter]:
outmess('All arguments will have attribute %s%s\n'%(m.group('this'),ll[:i]))
ll = ll + ','.join(groupcache[groupcounter]['args'])
if i<0:i=0;pl=''
else: pl=ll[:i].strip();ll=ll[i+2:]
ch = markoutercomma(pl).split('@,@')
if len(ch)>1:
pl = ch[0]
outmess('analyzeline: cannot handle multiple attributes without type specification. Ignoring %r.\n' % (','.join(ch[1:])))
last_name = None
for e in [x.strip() for x in markoutercomma(ll).split('@,@')]:
m1=namepattern.match(e)
if not m1:
if case in ['public','private']: k=''
else:
print m.groupdict()
outmess('analyzeline: no name pattern found in %s statement for %s. Skipping.\n'%(case,`e`))
continue
else:
k=rmbadname1(m1.group('name'))
if k not in edecl:
edecl[k]={}
if case=='dimension':
ap=case+m1.group('after')
if case=='intent':
ap=m.group('this')+pl
if _intentcallbackpattern.match(ap):
if k not in groupcache[groupcounter]['args']:
if groupcounter>1:
if '__user__' not in groupcache[groupcounter-2]['name']:
outmess('analyzeline: missing __user__ module (could be nothing)\n')
if k!=groupcache[groupcounter]['name']: # fixes ticket 1693
outmess('analyzeline: appending intent(callback) %s'\
' to %s arguments\n' % (k,groupcache[groupcounter]['name']))
groupcache[groupcounter]['args'].append(k)
else:
errmess('analyzeline: intent(callback) %s is ignored' % (k))
else:
errmess('analyzeline: intent(callback) %s is already'\
' in argument list' % (k))
if case in ['optional','required','public','external','private','intrisic']:
ap=case
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append(ap)
else:
edecl[k]['attrspec']=[ap]
if case=='external':
if groupcache[groupcounter]['block']=='program':
outmess('analyzeline: ignoring program arguments\n')
continue
if k not in groupcache[groupcounter]['args']:
#outmess('analyzeline: ignoring external %s (not in arguments list)\n'%(`k`))
continue
if 'externals' not in groupcache[groupcounter]:
groupcache[groupcounter]['externals']=[]
groupcache[groupcounter]['externals'].append(k)
last_name = k
groupcache[groupcounter]['vars']=edecl
if last_name is not None:
previous_context = ('variable',last_name,groupcounter)
elif case=='parameter':
edecl=groupcache[groupcounter]['vars']
ll=m.group('after').strip()[1:-1]
last_name = None
for e in markoutercomma(ll).split('@,@'):
try:
k,initexpr=[x.strip() for x in e.split('=')]
except:
outmess('analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n'%(e,ll));continue
params = get_parameters(edecl)
k=rmbadname1(k)
if k not in edecl:
edecl[k]={}
if '=' in edecl[k] and (not edecl[k]['=']==initexpr):
outmess('analyzeline: Overwriting the value of parameter "%s" ("%s") with "%s".\n'%(k,edecl[k]['='],initexpr))
t = determineexprtype(initexpr,params)
if t:
if t.get('typespec')=='real':
tt = list(initexpr)
for m in real16pattern.finditer(initexpr):
tt[m.start():m.end()] = list(\
initexpr[m.start():m.end()].lower().replace('d', 'e'))
initexpr = ''.join(tt)
elif t.get('typespec')=='complex':
initexpr = initexpr[1:].lower().replace('d','e').\
replace(',','+1j*(')
try:
v = eval(initexpr,{},params)
except (SyntaxError,NameError,TypeError),msg:
errmess('analyzeline: Failed to evaluate %r. Ignoring: %s\n'\
% (initexpr, msg))
continue
edecl[k]['='] = repr(v)
if 'attrspec' in edecl[k]:
edecl[k]['attrspec'].append('parameter')
else: edecl[k]['attrspec']=['parameter']
last_name = k
groupcache[groupcounter]['vars']=edecl
if last_name is not None:
previous_context = ('variable',last_name,groupcounter)
elif case=='implicit':
if m.group('after').strip().lower()=='none':
groupcache[groupcounter]['implicit']=None
elif m.group('after'):
if 'implicit' in groupcache[groupcounter]:
impl=groupcache[groupcounter]['implicit']
else: impl={}
if impl is None:
outmess('analyzeline: Overwriting earlier "implicit none" statement.\n')
impl={}
for e in markoutercomma(m.group('after')).split('@,@'):
decl={}
m1=re.match(r'\s*(?P<this>.*?)\s*(\(\s*(?P<after>[a-z-, ]+)\s*\)\s*|)\Z',e,re.I)
if not m1:
outmess('analyzeline: could not extract info of implicit statement part "%s"\n'%(e));continue
m2=typespattern4implicit.match(m1.group('this'))
if not m2:
outmess('analyzeline: could not extract types pattern of implicit statement part "%s"\n'%(e));continue
typespec,selector,attr,edecl=cracktypespec0(m2.group('this'),m2.group('after'))
kindselect,charselect,typename=cracktypespec(typespec,selector)
decl['typespec']=typespec
decl['kindselector']=kindselect
decl['charselector']=charselect
decl['typename']=typename
for k in decl.keys():
if not decl[k]: del decl[k]
for r in markoutercomma(m1.group('after')).split('@,@'):
if '-' in r:
try: begc,endc=[x.strip() for x in r.split('-')]
except:
outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n'%r);continue
else: begc=endc=r.strip()
if not len(begc)==len(endc)==1:
outmess('analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement (2)\n'%r);continue
for o in range(ord(begc),ord(endc)+1):
impl[chr(o)]=decl
groupcache[groupcounter]['implicit']=impl
elif case=='data':
ll=[]
dl='';il='';f=0;fc=1;inp=0
for c in m.group('after'):
if not inp:
if c=="'": fc=not fc
if c=='/' and fc: f=f+1;continue
if c=='(': inp = inp + 1
elif c==')': inp = inp - 1
if f==0: dl=dl+c
elif f==1: il=il+c
elif f==2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl,il])
dl=c;il='';f=0
if f==2:
dl = dl.strip()
if dl.startswith(','):
dl = dl[1:].strip()
ll.append([dl,il])
vars={}
if 'vars' in groupcache[groupcounter]:
vars=groupcache[groupcounter]['vars']
last_name = None
for l in ll:
l=[x.strip() for x in l]
if l[0][0]==',':l[0]=l[0][1:]
if l[0][0]=='(':
outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%l[0])
continue
#if '(' in l[0]:
# #outmess('analyzeline: ignoring this data statement.\n')
# continue
i=0;j=0;llen=len(l[1])
for v in rmbadname([x.strip() for x in markoutercomma(l[0]).split('@,@')]):
if v[0]=='(':
outmess('analyzeline: implied-DO list "%s" is not supported. Skipping.\n'%v)
# XXX: subsequent init expressions may get wrong values.
# Ignoring since data statements are irrelevant for wrapping.
continue
fc=0
while (i<llen) and (fc or not l[1][i]==','):
if l[1][i]=="'": fc=not fc
i=i+1
i=i+1
#v,l[1][j:i-1]=name,initvalue
if v not in vars:
vars[v]={}
if '=' in vars[v] and not vars[v]['=']==l[1][j:i-1]:
outmess('analyzeline: changing init expression of "%s" ("%s") to "%s"\n'%(v,vars[v]['='],l[1][j:i-1]))
vars[v]['=']=l[1][j:i-1]
j=i
last_name = v
groupcache[groupcounter]['vars']=vars
if last_name is not None:
previous_context = ('variable',last_name,groupcounter)
elif case=='common':
line=m.group('after').strip()
if not line[0]=='/':line='//'+line
cl=[]
f=0;bn='';ol=''
for c in line:
if c=='/':f=f+1;continue
if f>=3:
bn = bn.strip()
if not bn: bn='_BLNK_'
cl.append([bn,ol])
f=f-2;bn='';ol=''
if f%2: bn=bn+c
else: ol=ol+c
bn = bn.strip()
if not bn: bn='_BLNK_'
cl.append([bn,ol])
commonkey={}
if 'common' in groupcache[groupcounter]:
commonkey=groupcache[groupcounter]['common']
for c in cl:
if c[0] in commonkey:
outmess('analyzeline: previously defined common block encountered. Skipping.\n')
continue
commonkey[c[0]]=[]
for i in [x.strip() for x in markoutercomma(c[1]).split('@,@')]:
if i: commonkey[c[0]].append(i)
groupcache[groupcounter]['common']=commonkey
previous_context = ('common',bn,groupcounter)
elif case=='use':
m1=re.match(r'\A\s*(?P<name>\b[\w]+\b)\s*((,(\s*\bonly\b\s*:|(?P<notonly>))\s*(?P<list>.*))|)\s*\Z',m.group('after'),re.I)
if m1:
mm=m1.groupdict()
if 'use' not in groupcache[groupcounter]:
groupcache[groupcounter]['use']={}
name=m1.group('name')
groupcache[groupcounter]['use'][name]={}
isonly=0
if 'list' in mm and mm['list'] is not None:
if 'notonly' in mm and mm['notonly'] is None:
isonly=1
groupcache[groupcounter]['use'][name]['only']=isonly
ll=[x.strip() for x in mm['list'].split(',')]
rl={}
for l in ll:
if '=' in l:
m2=re.match(r'\A\s*(?P<local>\b[\w]+\b)\s*=\s*>\s*(?P<use>\b[\w]+\b)\s*\Z',l,re.I)
if m2: rl[m2.group('local').strip()]=m2.group('use').strip()
else:
outmess('analyzeline: Not local=>use pattern found in %s\n'%`l`)
else:
rl[l]=l
groupcache[groupcounter]['use'][name]['map']=rl
else:
pass
else:
print m.groupdict()
outmess('analyzeline: Could not crack the use statement.\n')
elif case in ['f2pyenhancements']:
if 'f2pyenhancements' not in groupcache[groupcounter]:
groupcache[groupcounter]['f2pyenhancements'] = {}
d = groupcache[groupcounter]['f2pyenhancements']
if m.group('this')=='usercode' and 'usercode' in d:
if type(d['usercode']) is type(''):
d['usercode'] = [d['usercode']]
d['usercode'].append(m.group('after'))
else:
d[m.group('this')] = m.group('after')
elif case=='multiline':
if previous_context is None:
if verbose:
outmess('analyzeline: No context for multiline block.\n')
return
gc = groupcounter
#gc = previous_context[2]
appendmultiline(groupcache[gc],
previous_context[:2],
m.group('this'))
else:
if verbose>1:
print m.groupdict()
outmess('analyzeline: No code implemented for line.\n')
def appendmultiline(group, context_name,ml):
if 'f2pymultilines' not in group:
group['f2pymultilines'] = {}
d = group['f2pymultilines']
if context_name not in d:
d[context_name] = []
d[context_name].append(ml)
return
def cracktypespec0(typespec,ll):
selector=None
attr=None
if re.match(r'double\s*complex',typespec,re.I): typespec='double complex'
elif re.match(r'double\s*precision',typespec,re.I): typespec='double precision'
else: typespec=typespec.strip().lower()
m1=selectpattern.match(markouterparen(ll))
if not m1:
outmess('cracktypespec0: no kind/char_selector pattern found for line.\n')
return
d=m1.groupdict()
for k in d.keys(): d[k]=unmarkouterparen(d[k])
if typespec in ['complex','integer','logical','real','character','type']:
selector=d['this']
ll=d['after']
i=ll.find('::')
if i>=0:
attr=ll[:i].strip()
ll=ll[i+2:]
return typespec,selector,attr,ll
#####
namepattern=re.compile(r'\s*(?P<name>\b[\w]+\b)\s*(?P<after>.*)\s*\Z',re.I)
kindselector=re.compile(r'\s*(\(\s*(kind\s*=)?\s*(?P<kind>.*)\s*\)|[*]\s*(?P<kind2>.*?))\s*\Z',re.I)
charselector=re.compile(r'\s*(\((?P<lenkind>.*)\)|[*]\s*(?P<charlen>.*))\s*\Z',re.I)
lenkindpattern=re.compile(r'\s*(kind\s*=\s*(?P<kind>.*?)\s*(@,@\s*len\s*=\s*(?P<len>.*)|)|(len\s*=\s*|)(?P<len2>.*?)\s*(@,@\s*(kind\s*=\s*|)(?P<kind2>.*)|))\s*\Z',re.I)
lenarraypattern=re.compile(r'\s*(@\(@\s*(?!/)\s*(?P<array>.*?)\s*@\)@\s*[*]\s*(?P<len>.*?)|([*]\s*(?P<len2>.*?)|)\s*(@\(@\s*(?!/)\s*(?P<array2>.*?)\s*@\)@|))\s*(=\s*(?P<init>.*?)|(@\(@|)/\s*(?P<init2>.*?)\s*/(@\)@|)|)\s*\Z',re.I)
def removespaces(expr):
expr=expr.strip()
if len(expr)<=1: return expr
expr2=expr[0]
for i in range(1,len(expr)-1):
if expr[i]==' ' and \
((expr[i+1] in "()[]{}=+-/* ") or (expr[i-1] in "()[]{}=+-/* ")): continue
expr2=expr2+expr[i]
expr2=expr2+expr[-1]
return expr2
def markinnerspaces(line):
l='';f=0
cc='\''
cc1='"'
cb=''
for c in line:
if cb=='\\' and c in ['\\','\'','"']:
l=l+c;
cb=c
continue
if f==0 and c in ['\'','"']: cc=c; cc1={'\'':'"','"':'\''}[c]
if c==cc:f=f+1
elif c==cc:f=f-1
elif c==' ' and f==1: l=l+'@_@'; continue
l=l+c;cb=c
return l
def updatevars(typespec,selector,attrspec,entitydecl):
global groupcache,groupcounter
last_name = None
kindselect,charselect,typename=cracktypespec(typespec,selector)
if attrspec:
attrspec=[x.strip() for x in markoutercomma(attrspec).split('@,@')]
l = []
c = re.compile(r'(?P<start>[a-zA-Z]+)')
for a in attrspec:
if not a:
continue
m = c.match(a)
if m:
s = m.group('start').lower()
a = s + a[len(s):]
l.append(a)
attrspec = l
el=[x.strip() for x in markoutercomma(entitydecl).split('@,@')]
el1=[]
for e in el:
for e1 in [x.strip() for x in markoutercomma(removespaces(markinnerspaces(e)),comma=' ').split('@ @')]:
if e1: el1.append(e1.replace('@_@',' '))
for e in el1:
m=namepattern.match(e)
if not m:
outmess('updatevars: no name pattern found for entity=%s. Skipping.\n'%(`e`))
continue
ename=rmbadname1(m.group('name'))
edecl={}
if ename in groupcache[groupcounter]['vars']:
edecl=groupcache[groupcounter]['vars'][ename].copy()
not_has_typespec = 'typespec' not in edecl
if not_has_typespec:
edecl['typespec']=typespec
elif typespec and (not typespec==edecl['typespec']):
outmess('updatevars: attempt to change the type of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typespec'],typespec))
if 'kindselector' not in edecl:
edecl['kindselector']=copy.copy(kindselect)
elif kindselect:
for k in kindselect.keys():
if k in edecl['kindselector'] and (not kindselect[k]==edecl['kindselector'][k]):
outmess('updatevars: attempt to change the kindselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['kindselector'][k],kindselect[k]))
else: edecl['kindselector'][k]=copy.copy(kindselect[k])
if 'charselector' not in edecl and charselect:
if not_has_typespec:
edecl['charselector']=charselect
else:
errmess('updatevars:%s: attempt to change empty charselector to %r. Ignoring.\n' \
%(ename,charselect))
elif charselect:
for k in charselect.keys():
if k in edecl['charselector'] and (not charselect[k]==edecl['charselector'][k]):
outmess('updatevars: attempt to change the charselector "%s" of "%s" ("%s") to "%s". Ignoring.\n' % (k,ename,edecl['charselector'][k],charselect[k]))
else: edecl['charselector'][k]=copy.copy(charselect[k])
if 'typename' not in edecl:
edecl['typename']=typename
elif typename and (not edecl['typename']==typename):
outmess('updatevars: attempt to change the typename of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['typename'],typename))
if 'attrspec' not in edecl:
edecl['attrspec']=copy.copy(attrspec)
elif attrspec:
for a in attrspec:
if a not in edecl['attrspec']:
edecl['attrspec'].append(a)
else:
edecl['typespec']=copy.copy(typespec)
edecl['kindselector']=copy.copy(kindselect)
edecl['charselector']=copy.copy(charselect)
edecl['typename']=typename
edecl['attrspec']=copy.copy(attrspec)
if m.group('after'):
m1=lenarraypattern.match(markouterparen(m.group('after')))
if m1:
d1=m1.groupdict()
for lk in ['len','array','init']:
if d1[lk+'2'] is not None: d1[lk]=d1[lk+'2']; del d1[lk+'2']
for k in d1.keys():
if d1[k] is not None: d1[k]=unmarkouterparen(d1[k])
else: del d1[k]
if 'len' in d1 and 'array' in d1:
if d1['len']=='':
d1['len']=d1['array']
del d1['array']
else:
d1['array']=d1['array']+','+d1['len']
del d1['len']
errmess('updatevars: "%s %s" is mapped to "%s %s(%s)"\n'%(typespec,e,typespec,ename,d1['array']))
if 'array' in d1:
dm = 'dimension(%s)'%d1['array']
if 'attrspec' not in edecl or (not edecl['attrspec']):
edecl['attrspec']=[dm]
else:
edecl['attrspec'].append(dm)
for dm1 in edecl['attrspec']:
if dm1[:9]=='dimension' and dm1!=dm:
del edecl['attrspec'][-1]
errmess('updatevars:%s: attempt to change %r to %r. Ignoring.\n' \
% (ename,dm1,dm))
break
if 'len' in d1:
if typespec in ['complex','integer','logical','real']:
if ('kindselector' not in edecl) or (not edecl['kindselector']):
edecl['kindselector']={}
edecl['kindselector']['*']=d1['len']
elif typespec == 'character':
if ('charselector' not in edecl) or (not edecl['charselector']):
edecl['charselector']={}
if 'len' in edecl['charselector']:
del edecl['charselector']['len']
edecl['charselector']['*']=d1['len']
if 'init' in d1:
if '=' in edecl and (not edecl['=']==d1['init']):
outmess('updatevars: attempt to change the init expression of "%s" ("%s") to "%s". Ignoring.\n' % (ename,edecl['='],d1['init']))
else:
edecl['=']=d1['init']
else:
outmess('updatevars: could not crack entity declaration "%s". Ignoring.\n'%(ename+m.group('after')))
for k in edecl.keys():
if not edecl[k]:
del edecl[k]
groupcache[groupcounter]['vars'][ename]=edecl
if 'varnames' in groupcache[groupcounter]:
groupcache[groupcounter]['varnames'].append(ename)
last_name = ename
return last_name
def cracktypespec(typespec,selector):
kindselect=None
charselect=None
typename=None
if selector:
if typespec in ['complex','integer','logical','real']:
kindselect=kindselector.match(selector)
if not kindselect:
outmess('cracktypespec: no kindselector pattern found for %s\n'%(`selector`))
return
kindselect=kindselect.groupdict()
kindselect['*']=kindselect['kind2']
del kindselect['kind2']
for k in kindselect.keys():
if not kindselect[k]: del kindselect[k]
for k,i in kindselect.items():
kindselect[k] = rmbadname1(i)
elif typespec=='character':
charselect=charselector.match(selector)
if not charselect:
outmess('cracktypespec: no charselector pattern found for %s\n'%(`selector`))
return
charselect=charselect.groupdict()
charselect['*']=charselect['charlen']
del charselect['charlen']
if charselect['lenkind']:
lenkind=lenkindpattern.match(markoutercomma(charselect['lenkind']))
lenkind=lenkind.groupdict()
for lk in ['len','kind']:
if lenkind[lk+'2']:
lenkind[lk]=lenkind[lk+'2']
charselect[lk]=lenkind[lk]
del lenkind[lk+'2']
del charselect['lenkind']
for k in charselect.keys():
if not charselect[k]: del charselect[k]
for k,i in charselect.items():
charselect[k] = rmbadname1(i)
elif typespec=='type':
typename=re.match(r'\s*\(\s*(?P<name>\w+)\s*\)',selector,re.I)
if typename: typename=typename.group('name')
else: outmess('cracktypespec: no typename found in %s\n'%(`typespec+selector`))
else:
outmess('cracktypespec: no selector used for %s\n'%(`selector`))
return kindselect,charselect,typename
######
def setattrspec(decl,attr,force=0):
if not decl:
decl={}
if not attr:
return decl
if 'attrspec' not in decl:
decl['attrspec']=[attr]
return decl
if force: decl['attrspec'].append(attr)
if attr in decl['attrspec']: return decl
if attr=='static' and 'automatic' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='automatic' and 'static' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='public' and 'private' not in decl['attrspec']:
decl['attrspec'].append(attr)
elif attr=='private' and 'public' not in decl['attrspec']:
decl['attrspec'].append(attr)
else:
decl['attrspec'].append(attr)
return decl
def setkindselector(decl,sel,force=0):
if not decl:
decl={}
if not sel:
return decl
if 'kindselector' not in decl:
decl['kindselector']=sel
return decl
for k in sel.keys():
if force or k not in decl['kindselector']:
decl['kindselector'][k]=sel[k]
return decl
def setcharselector(decl,sel,force=0):
if not decl:
decl={}
if not sel:
return decl
if 'charselector' not in decl:
decl['charselector']=sel
return decl
for k in sel.keys():
if force or k not in decl['charselector']:
decl['charselector'][k]=sel[k]
return decl
def getblockname(block,unknown='unknown'):
if 'name' in block:
return block['name']
return unknown
###### post processing
def setmesstext(block):
global filepositiontext
try:
filepositiontext='In: %s:%s\n'%(block['from'],block['name'])
except:
pass
def get_usedict(block):
usedict = {}
if 'parent_block' in block:
usedict = get_usedict(block['parent_block'])
if 'use' in block:
usedict.update(block['use'])
return usedict
def get_useparameters(block, param_map=None):
global f90modulevars
if param_map is None:
param_map = {}
usedict = get_usedict(block)
if not usedict:
return param_map
for usename,mapping in usedict.items():
usename = usename.lower()
if usename not in f90modulevars:
outmess('get_useparameters: no module %s info used by %s\n' % (usename, block.get('name')))
continue
mvars = f90modulevars[usename]
params = get_parameters(mvars)
if not params:
continue
# XXX: apply mapping
if mapping:
errmess('get_useparameters: mapping for %s not impl.' % (mapping))
for k,v in params.items():
if k in param_map:
outmess('get_useparameters: overriding parameter %s with'\
' value from module %s' % (`k`,`usename`))
param_map[k] = v
return param_map
def postcrack2(block,tab='',param_map=None):
global f90modulevars
if not f90modulevars:
return block
if type(block)==types.ListType:
ret = []
for g in block:
g = postcrack2(g,tab=tab+'\t',param_map=param_map)
ret.append(g)
return ret
setmesstext(block)
outmess('%sBlock: %s\n'%(tab,block['name']),0)
if param_map is None:
param_map = get_useparameters(block)
if param_map is not None and 'vars' in block:
vars = block['vars']
for n in vars.keys():
var = vars[n]
if 'kindselector' in var:
kind = var['kindselector']
if 'kind' in kind:
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
new_body = []
for b in block['body']:
b = postcrack2(b,tab=tab+'\t',param_map=param_map)
new_body.append(b)
block['body'] = new_body
return block
def postcrack(block,args=None,tab=''):
"""
TODO:
function return values
determine expression types if in argument list
"""
global usermodules,onlyfunctions
if type(block)==types.ListType:
gret=[]
uret=[]
for g in block:
setmesstext(g)
g=postcrack(g,tab=tab+'\t')
if 'name' in g and '__user__' in g['name']: # sort user routines to appear first
uret.append(g)
else:
gret.append(g)
return uret+gret
setmesstext(block)
if (not type(block)==types.DictType) and 'block' not in block:
raise Exception('postcrack: Expected block dictionary instead of ' + \
str(block))
if 'name' in block and not block['name']=='unknown_interface':
outmess('%sBlock: %s\n'%(tab,block['name']),0)
blocktype=block['block']
block=analyzeargs(block)
block=analyzecommon(block)
block['vars']=analyzevars(block)
block['sortvars']=sortvarnames(block['vars'])
if 'args' in block and block['args']:
args=block['args']
block['body']=analyzebody(block,args,tab=tab)
userisdefined=[]
## fromuser = []
if 'use' in block:
useblock=block['use']
for k in useblock.keys():
if '__user__' in k:
userisdefined.append(k)
## if 'map' in useblock[k]:
## for n in useblock[k]['map'].values():
## if n not in fromuser: fromuser.append(n)
else: useblock={}
name=''
if 'name' in block:
name=block['name']
if 'externals' in block and block['externals']:# and not userisdefined: # Build a __user__ module
interfaced=[]
if 'interfaced' in block:
interfaced=block['interfaced']
mvars=copy.copy(block['vars'])
if name:
mname=name+'__user__routines'
else:
mname='unknown__user__routines'
if mname in userisdefined:
i=1
while '%s_%i'%(mname,i) in userisdefined: i=i+1
mname='%s_%i'%(mname,i)
interface={'block':'interface','body':[],'vars':{},'name':name+'_user_interface'}
for e in block['externals']:
## if e in fromuser:
## outmess(' Skipping %s that is defined explicitly in another use statement\n'%(`e`))
## continue
if e in interfaced:
edef=[]
j=-1
for b in block['body']:
j=j+1
if b['block']=='interface':
i=-1
for bb in b['body']:
i=i+1
if 'name' in bb and bb['name']==e:
edef=copy.copy(bb)
del b['body'][i]
break
if edef:
if not b['body']: del block['body'][j]
del interfaced[interfaced.index(e)]
break
interface['body'].append(edef)
else:
if e in mvars and not isexternal(mvars[e]):
interface['vars'][e]=mvars[e]
if interface['vars'] or interface['body']:
block['interfaced']=interfaced
mblock={'block':'python module','body':[interface],'vars':{},'name':mname,'interfaced':block['externals']}
useblock[mname]={}
usermodules.append(mblock)
if useblock:
block['use']=useblock
return block
def sortvarnames(vars):
indep = []
dep = []
for v in vars.keys():
if 'depend' in vars[v] and vars[v]['depend']:
dep.append(v)
#print '%s depends on %s'%(v,vars[v]['depend'])
else: indep.append(v)
n = len(dep)
i = 0
while dep: #XXX: How to catch dependence cycles correctly?
v = dep[0]
fl = 0
for w in dep[1:]:
if w in vars[v]['depend']:
fl = 1
break
if fl:
dep = dep[1:]+[v]
i = i + 1
if i>n:
errmess('sortvarnames: failed to compute dependencies because'
' of cyclic dependencies between '
+', '.join(dep)+'\n')
indep = indep + dep
break
else:
indep.append(v)
dep = dep[1:]
n = len(dep)
i = 0
#print indep
return indep
def analyzecommon(block):
if not hascommon(block): return block
commonvars=[]
for k in block['common'].keys():
comvars=[]
for e in block['common'][k]:
m=re.match(r'\A\s*\b(?P<name>.*?)\b\s*(\((?P<dims>.*?)\)|)\s*\Z',e,re.I)
if m:
dims=[]
if m.group('dims'):
dims=[x.strip() for x in markoutercomma(m.group('dims')).split('@,@')]
n=m.group('name').strip()
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append('dimension(%s)'%(','.join(dims)))
else:
block['vars'][n]['attrspec']=['dimension(%s)'%(','.join(dims))]
else:
if dims:
block['vars'][n]={'attrspec':['dimension(%s)'%(','.join(dims))]}
else: block['vars'][n]={}
if n not in commonvars: commonvars.append(n)
else:
n=e
errmess('analyzecommon: failed to extract "<name>[(<dims>)]" from "%s" in common /%s/.\n'%(e,k))
comvars.append(n)
block['common'][k]=comvars
if 'commonvars' not in block:
block['commonvars']=commonvars
else:
block['commonvars']=block['commonvars']+commonvars
return block
def analyzebody(block,args,tab=''):
global usermodules,skipfuncs,onlyfuncs,f90modulevars
setmesstext(block)
body=[]
for b in block['body']:
b['parent_block'] = block
if b['block'] in ['function','subroutine']:
if args is not None and b['name'] not in args:
continue
else:
as_=b['args']
if b['name'] in skipfuncs:
continue
if onlyfuncs and b['name'] not in onlyfuncs:
continue
b['saved_interface'] = crack2fortrangen(b, '\n'+' '*6, as_interface=True)
else: as_=args
b=postcrack(b,as_,tab=tab+'\t')
if b['block']=='interface' and not b['body']:
if 'f2pyenhancements' not in b:
continue
if b['block'].replace(' ','')=='pythonmodule':
usermodules.append(b)
else:
if b['block']=='module':
f90modulevars[b['name']] = b['vars']
body.append(b)
return body
def buildimplicitrules(block):
setmesstext(block)
implicitrules=defaultimplicitrules
attrrules={}
if 'implicit' in block:
if block['implicit'] is None:
implicitrules=None
if verbose>1:
outmess('buildimplicitrules: no implicit rules for routine %s.\n'%`block['name']`)
else:
for k in block['implicit'].keys():
if block['implicit'][k].get('typespec') not in ['static','automatic']:
implicitrules[k]=block['implicit'][k]
else:
attrrules[k]=block['implicit'][k]['typespec']
return implicitrules,attrrules
def myeval(e,g=None,l=None):
r = eval(e,g,l)
if type(r) in [type(0),type(0.0)]:
return r
raise ValueError('r=%r' % (r))
getlincoef_re_1 = re.compile(r'\A\b\w+\b\Z',re.I)
def getlincoef(e,xset): # e = a*x+b ; x in xset
try:
c = int(myeval(e,{},{}))
return 0,c,None
except: pass
if getlincoef_re_1.match(e):
return 1,0,e
len_e = len(e)
for x in xset:
if len(x)>len_e: continue
if re.search(r'\w\s*\([^)]*\b'+x+r'\b', e):
# skip function calls having x as an argument, e.g max(1, x)
continue
re_1 = re.compile(r'(?P<before>.*?)\b'+x+r'\b(?P<after>.*)',re.I)
m = re_1.match(e)
if m:
try:
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'),0,m1.group('after'))
m1 = re_1.match(ee)
b = myeval(ee,{},{})
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'),1,m1.group('after'))
m1 = re_1.match(ee)
a = myeval(ee,{},{}) - b
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'),0.5,m1.group('after'))
m1 = re_1.match(ee)
c = myeval(ee,{},{})
# computing another point to be sure that expression is linear
m1 = re_1.match(e)
while m1:
ee = '%s(%s)%s'%(m1.group('before'),1.5,m1.group('after'))
m1 = re_1.match(ee)
c2 = myeval(ee,{},{})
if (a*0.5+b==c and a*1.5+b==c2):
return a,b,x
except: pass
break
return None,None,None
_varname_match = re.compile(r'\A[a-z]\w*\Z').match
def getarrlen(dl,args,star='*'):
edl = []
try: edl.append(myeval(dl[0],{},{}))
except: edl.append(dl[0])
try: edl.append(myeval(dl[1],{},{}))
except: edl.append(dl[1])
if type(edl[0]) is type(0):
p1 = 1-edl[0]
if p1==0: d = str(dl[1])
elif p1<0: d = '%s-%s'%(dl[1],-p1)
else: d = '%s+%s'%(dl[1],p1)
elif type(edl[1]) is type(0):
p1 = 1+edl[1]
if p1==0: d='-(%s)' % (dl[0])
else: d='%s-(%s)' % (p1,dl[0])
else: d = '%s-(%s)+1'%(dl[1],dl[0])
try: return `myeval(d,{},{})`,None,None
except: pass
d1,d2=getlincoef(dl[0],args),getlincoef(dl[1],args)
if None not in [d1[0],d2[0]]:
if (d1[0],d2[0])==(0,0):
return `d2[1]-d1[1]+1`,None,None
b = d2[1] - d1[1] + 1
d1 = (d1[0],0,d1[2])
d2 = (d2[0],b,d2[2])
if d1[0]==0 and d2[2] in args:
if b<0: return '%s * %s - %s'%(d2[0],d2[2],-b),d2[2],'+%s)/(%s)'%(-b,d2[0])
elif b: return '%s * %s + %s'%(d2[0],d2[2],b),d2[2],'-%s)/(%s)'%(b,d2[0])
else: return '%s * %s'%(d2[0],d2[2]),d2[2],')/(%s)'%(d2[0])
if d2[0]==0 and d1[2] in args:
if b<0: return '%s * %s - %s'%(-d1[0],d1[2],-b),d1[2],'+%s)/(%s)'%(-b,-d1[0])
elif b: return '%s * %s + %s'%(-d1[0],d1[2],b),d1[2],'-%s)/(%s)'%(b,-d1[0])
else: return '%s * %s'%(-d1[0],d1[2]),d1[2],')/(%s)'%(-d1[0])
if d1[2]==d2[2] and d1[2] in args:
a = d2[0] - d1[0]
if not a: return `b`,None,None
if b<0: return '%s * %s - %s'%(a,d1[2],-b),d2[2],'+%s)/(%s)'%(-b,a)
elif b: return '%s * %s + %s'%(a,d1[2],b),d2[2],'-%s)/(%s)'%(b,a)
else: return '%s * %s'%(a,d1[2]),d2[2],')/(%s)'%(a)
if d1[0]==d2[0]==1:
c = str(d1[2])
if c not in args:
if _varname_match(c):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c))
c = '(%s)'%c
if b==0: d='%s-%s' % (d2[2],c)
elif b<0: d='%s-%s-%s' % (d2[2],c,-b)
else: d='%s-%s+%s' % (d2[2],c,b)
elif d1[0]==0:
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)'%c2
if d2[0]==1: pass
elif d2[0]==-1: c2='-%s' %c2
else: c2='%s*%s'%(d2[0],c2)
if b==0: d=c2
elif b<0: d='%s-%s' % (c2,-b)
else: d='%s+%s' % (c2,b)
elif d2[0]==0:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)'%c1
if d1[0]==1: c1='-%s'%c1
elif d1[0]==-1: c1='+%s'%c1
elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1)
else: c1 = '-%s*%s' % (d1[0],c1)
if b==0: d=c1
elif b<0: d='%s-%s' % (c1,-b)
else: d='%s+%s' % (c1,b)
else:
c1 = str(d1[2])
if c1 not in args:
if _varname_match(c1):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c1))
c1 = '(%s)'%c1
if d1[0]==1: c1='-%s'%c1
elif d1[0]==-1: c1='+%s'%c1
elif d1[0]<0: c1='+%s*%s'%(-d1[0],c1)
else: c1 = '-%s*%s' % (d1[0],c1)
c2 = str(d2[2])
if c2 not in args:
if _varname_match(c2):
outmess('\tgetarrlen:variable "%s" undefined\n' % (c2))
c2 = '(%s)'%c2
if d2[0]==1: pass
elif d2[0]==-1: c2='-%s' %c2
else: c2='%s*%s'%(d2[0],c2)
if b==0: d='%s%s' % (c2,c1)
elif b<0: d='%s%s-%s' % (c2,c1,-b)
else: d='%s%s+%s' % (c2,c1,b)
return d,None,None
word_pattern = re.compile(r'\b[a-z][\w$]*\b',re.I)
def _get_depend_dict(name, vars, deps):
if name in vars:
words = vars[name].get('depend',[])
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
if word not in words and word in vars:
words.append(word)
for word in words[:]:
for w in deps.get(word,[]) \
or _get_depend_dict(word, vars, deps):
if w not in words:
words.append(w)
else:
outmess('_get_depend_dict: no dependence info for %s\n' % (`name`))
words = []
deps[name] = words
return words
def _calc_depend_dict(vars):
names = vars.keys()
depend_dict = {}
for n in names:
_get_depend_dict(n, vars, depend_dict)
return depend_dict
def get_sorted_names(vars):
"""
"""
depend_dict = _calc_depend_dict(vars)
names = []
for name in depend_dict.keys():
if not depend_dict[name]:
names.append(name)
del depend_dict[name]
while depend_dict:
for name, lst in depend_dict.items():
new_lst = [n for n in lst if n in depend_dict]
if not new_lst:
names.append(name)
del depend_dict[name]
else:
depend_dict[name] = new_lst
return [name for name in names if name in vars]
def _kind_func(string):
#XXX: return something sensible.
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
return 8
elif real8pattern.match(string):
return 4
return 'kind('+string+')'
def _selected_int_kind_func(r):
#XXX: This should be processor dependent
m = 10**r
if m<=2**8: return 1
if m<=2**16: return 2
if m<=2**32: return 4
if m<=2**63: return 8
if m<=2**128: return 16
return -1
def _selected_real_kind_func(p, r=0, radix=0):
#XXX: This should be processor dependent
# This is only good for 0 <= p <= 20
if p < 7: return 4
if p < 16: return 8
if platform.machine().lower().startswith('power'):
if p <= 20:
return 16
else:
if p < 19:
return 10
elif p <= 20:
return 16
return -1
def get_parameters(vars, global_params={}):
params = copy.copy(global_params)
g_params = copy.copy(global_params)
for name,func in [('kind',_kind_func),
('selected_int_kind',_selected_int_kind_func),
('selected_real_kind',_selected_real_kind_func),
]:
if name not in g_params:
g_params[name] = func
param_names = []
for n in get_sorted_names(vars):
if 'attrspec' in vars[n] and 'parameter' in vars[n]['attrspec']:
param_names.append(n)
kind_re = re.compile(r'\bkind\s*\(\s*(?P<value>.*)\s*\)',re.I)
selected_int_kind_re = re.compile(r'\bselected_int_kind\s*\(\s*(?P<value>.*)\s*\)',re.I)
selected_kind_re = re.compile(r'\bselected_(int|real)_kind\s*\(\s*(?P<value>.*)\s*\)',re.I)
for n in param_names:
if '=' in vars[n]:
v = vars[n]['=']
if islogical(vars[n]):
v = v.lower()
for repl in [
('.false.','False'),
('.true.','True'),
#TODO: test .eq., .neq., etc replacements.
]:
v = v.replace(*repl)
v = kind_re.sub(r'kind("\1")',v)
v = selected_int_kind_re.sub(r'selected_int_kind(\1)',v)
if isinteger(vars[n]) and not selected_kind_re.match(v):
v = v.split('_')[0]
if isdouble(vars[n]):
tt = list(v)
for m in real16pattern.finditer(v):
tt[m.start():m.end()] = list(\
v[m.start():m.end()].lower().replace('d', 'e'))
v = ''.join(tt)
if iscomplex(vars[n]):
if v[0]=='(' and v[-1]==')':
l = markoutercomma(v[1:-1]).split('@,@')
try:
params[n] = eval(v,g_params,params)
except Exception,msg:
params[n] = v
#print params
outmess('get_parameters: got "%s" on %s\n' % (msg,`v`))
if isstring(vars[n]) and type(params[n]) is type(0):
params[n] = chr(params[n])
nl = n.lower()
if nl!=n:
params[nl] = params[n]
else:
print vars[n]
outmess('get_parameters:parameter %s does not have value?!\n'%(`n`))
return params
def _eval_length(length,params):
if length in ['(:)','(*)','*']:
return '(*)'
return _eval_scalar(length,params)
_is_kind_number = re.compile(r'\d+_').match
def _eval_scalar(value,params):
if _is_kind_number(value):
value = value.split('_')[0]
try:
value = str(eval(value,{},params))
except (NameError, SyntaxError):
return value
except Exception,msg:
errmess('"%s" in evaluating %r '\
'(available names: %s)\n' \
% (msg,value,params.keys()))
return value
def analyzevars(block):
global f90modulevars
setmesstext(block)
implicitrules,attrrules=buildimplicitrules(block)
vars=copy.copy(block['vars'])
if block['block']=='function' and block['name'] not in vars:
vars[block['name']]={}
if '' in block['vars']:
del vars['']
if 'attrspec' in block['vars']['']:
gen=block['vars']['']['attrspec']
for n in vars.keys():
for k in ['public','private']:
if k in gen:
vars[n]=setattrspec(vars[n],k)
svars=[]
args = block['args']
for a in args:
try:
vars[a]
svars.append(a)
except KeyError:
pass
for n in vars.keys():
if n not in args: svars.append(n)
params = get_parameters(vars, get_useparameters(block))
dep_matches = {}
name_match = re.compile(r'\w[\w\d_$]*').match
for v in vars.keys():
m = name_match(v)
if m:
n = v[m.start():m.end()]
try:
dep_matches[n]
except KeyError:
dep_matches[n] = re.compile(r'.*\b%s\b'%(v),re.I).match
for n in svars:
if n[0] in attrrules.keys():
vars[n]=setattrspec(vars[n],attrrules[n[0]])
if 'typespec' not in vars[n]:
if not('attrspec' in vars[n] and 'external' in vars[n]['attrspec']):
if implicitrules:
ln0 = n[0].lower()
for k in implicitrules[ln0].keys():
if k=='typespec' and implicitrules[ln0][k]=='undefined':
continue
if k not in vars[n]:
vars[n][k]=implicitrules[ln0][k]
elif k=='attrspec':
for l in implicitrules[ln0][k]:
vars[n]=setattrspec(vars[n],l)
elif n in block['args']:
outmess('analyzevars: typespec of variable %s is not defined in routine %s.\n'%(`n`,block['name']))
if 'charselector' in vars[n]:
if 'len' in vars[n]['charselector']:
l = vars[n]['charselector']['len']
try:
l = str(eval(l,{},params))
except:
pass
vars[n]['charselector']['len'] = l
if 'kindselector' in vars[n]:
if 'kind' in vars[n]['kindselector']:
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l,{},params))
except:
pass
vars[n]['kindselector']['kind'] = l
savelindims = {}
if 'attrspec' in vars[n]:
attr=vars[n]['attrspec']
attr.reverse()
vars[n]['attrspec']=[]
dim,intent,depend,check,note=None,None,None,None,None
for a in attr:
if a[:9]=='dimension': dim=(a[9:].strip())[1:-1]
elif a[:6]=='intent': intent=(a[6:].strip())[1:-1]
elif a[:6]=='depend': depend=(a[6:].strip())[1:-1]
elif a[:5]=='check': check=(a[5:].strip())[1:-1]
elif a[:4]=='note': note=(a[4:].strip())[1:-1]
else: vars[n]=setattrspec(vars[n],a)
if intent:
if 'intent' not in vars[n]:
vars[n]['intent']=[]
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
if not c in vars[n]['intent']:
vars[n]['intent'].append(c)
intent=None
if note:
note=note.replace('\\n\\n','\n\n')
note=note.replace('\\n ','\n')
if 'note' not in vars[n]:
vars[n]['note']=[note]
else:
vars[n]['note'].append(note)
note=None
if depend is not None:
if 'depend' not in vars[n]:
vars[n]['depend']=[]
for c in rmbadname([x.strip() for x in markoutercomma(depend).split('@,@')]):
if c not in vars[n]['depend']:
vars[n]['depend'].append(c)
depend=None
if check is not None:
if 'check' not in vars[n]:
vars[n]['check']=[]
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
if not c in vars[n]['check']:
vars[n]['check'].append(c)
check=None
if dim and 'dimension' not in vars[n]:
vars[n]['dimension']=[]
for d in rmbadname([x.strip() for x in markoutercomma(dim).split('@,@')]):
star = '*'
if d==':':
star=':'
if d in params:
d = str(params[d])
for p in params.keys():
m = re.match(r'(?P<before>.*?)\b'+p+r'\b(?P<after>.*)',d,re.I)
if m:
#outmess('analyzevars:replacing parameter %s in %s (dimension of %s) with %s\n'%(`p`,`d`,`n`,`params[p]`))
d = m.group('before')+str(params[p])+m.group('after')
if d==star:
dl = [star]
else:
dl=markoutercomma(d,':').split('@:@')
if len(dl)==2 and '*' in dl: # e.g. dimension(5:*)
dl = ['*']
d = '*'
if len(dl)==1 and not dl[0]==star: dl = ['1',dl[0]]
if len(dl)==2:
d,v,di = getarrlen(dl,block['vars'].keys())
if d[:4] == '1 * ': d = d[4:]
if di and di[-4:] == '/(1)': di = di[:-4]
if v: savelindims[d] = v,di
vars[n]['dimension'].append(d)
if 'dimension' in vars[n]:
if isintent_c(vars[n]):
shape_macro = 'shape'
else:
shape_macro = 'shape'#'fshape'
if isstringarray(vars[n]):
if 'charselector' in vars[n]:
d = vars[n]['charselector']
if '*' in d:
d = d['*']
errmess('analyzevars: character array "character*%s %s(%s)" is considered as "character %s(%s)"; "intent(c)" is forced.\n'\
%(d,n,
','.join(vars[n]['dimension']),
n,','.join(vars[n]['dimension']+[d])))
vars[n]['dimension'].append(d)
del vars[n]['charselector']
if 'intent' not in vars[n]:
vars[n]['intent'] = []
if 'c' not in vars[n]['intent']:
vars[n]['intent'].append('c')
else:
errmess("analyzevars: charselector=%r unhandled." % (d))
if 'check' not in vars[n] and 'args' in block and n in block['args']:
flag = 'depend' not in vars[n]
if flag:
vars[n]['depend']=[]
vars[n]['check']=[]
if 'dimension' in vars[n]:
#/----< no check
#vars[n]['check'].append('rank(%s)==%s'%(n,len(vars[n]['dimension'])))
i=-1; ni=len(vars[n]['dimension'])
for d in vars[n]['dimension']:
ddeps=[] # dependecies of 'd'
ad=''
pd=''
#origd = d
if d not in vars:
if d in savelindims:
pd,ad='(',savelindims[d][1]
d = savelindims[d][0]
else:
for r in block['args']:
#for r in block['vars'].keys():
if r not in vars:
continue
if re.match(r'.*?\b'+r+r'\b',d,re.I):
ddeps.append(r)
if d in vars:
if 'attrspec' in vars[d]:
for aa in vars[d]['attrspec']:
if aa[:6]=='depend':
ddeps += aa[6:].strip()[1:-1].split(',')
if 'depend' in vars[d]:
ddeps=ddeps+vars[d]['depend']
i=i+1
if d in vars and ('depend' not in vars[d]) \
and ('=' not in vars[d]) and (d not in vars[n]['depend']) \
and l_or(isintent_in, isintent_inout, isintent_inplace)(vars[n]):
vars[d]['depend']=[n]
if ni>1:
vars[d]['=']='%s%s(%s,%s)%s'% (pd,shape_macro,n,i,ad)
else:
vars[d]['=']='%slen(%s)%s'% (pd,n,ad)
# /---< no check
if 1 and 'check' not in vars[d]:
if ni>1:
vars[d]['check']=['%s%s(%s,%i)%s==%s'\
%(pd,shape_macro,n,i,ad,d)]
else:
vars[d]['check']=['%slen(%s)%s>=%s'%(pd,n,ad,d)]
if 'attrspec' not in vars[d]:
vars[d]['attrspec']=['optional']
if ('optional' not in vars[d]['attrspec']) and\
('required' not in vars[d]['attrspec']):
vars[d]['attrspec'].append('optional')
elif d not in ['*',':']:
#/----< no check
#if ni>1: vars[n]['check'].append('shape(%s,%i)==%s'%(n,i,d))
#else: vars[n]['check'].append('len(%s)>=%s'%(n,d))
if flag:
if d in vars:
if n not in ddeps:
vars[n]['depend'].append(d)
else:
vars[n]['depend'] = vars[n]['depend'] + ddeps
elif isstring(vars[n]):
length='1'
if 'charselector' in vars[n]:
if '*' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['*'],
params)
vars[n]['charselector']['*']=length
elif 'len' in vars[n]['charselector']:
length = _eval_length(vars[n]['charselector']['len'],
params)
del vars[n]['charselector']['len']
vars[n]['charselector']['*']=length
if not vars[n]['check']:
del vars[n]['check']
if flag and not vars[n]['depend']:
del vars[n]['depend']
if '=' in vars[n]:
if 'attrspec' not in vars[n]:
vars[n]['attrspec']=[]
if ('optional' not in vars[n]['attrspec']) and \
('required' not in vars[n]['attrspec']):
vars[n]['attrspec'].append('optional')
if 'depend' not in vars[n]:
vars[n]['depend']=[]
for v,m in dep_matches.items():
if m(vars[n]['=']): vars[n]['depend'].append(v)
if not vars[n]['depend']: del vars[n]['depend']
if isscalar(vars[n]):
vars[n]['='] = _eval_scalar(vars[n]['='],params)
for n in vars.keys():
if n==block['name']: # n is block name
if 'note' in vars[n]:
block['note']=vars[n]['note']
if block['block']=='function':
if 'result' in block and block['result'] in vars:
vars[n]=appenddecl(vars[n],vars[block['result']])
if 'prefix' in block:
pr=block['prefix']; ispure=0; isrec=1
pr1=pr.replace('pure','')
ispure=(not pr==pr1)
pr=pr1.replace('recursive','')
isrec=(not pr==pr1)
m=typespattern[0].match(pr)
if m:
typespec,selector,attr,edecl=cracktypespec0(m.group('this'),m.group('after'))
kindselect,charselect,typename=cracktypespec(typespec,selector)
vars[n]['typespec']=typespec
if kindselect:
if 'kind' in kindselect:
try:
kindselect['kind'] = eval(kindselect['kind'],{},params)
except:
pass
vars[n]['kindselector']=kindselect
if charselect: vars[n]['charselector']=charselect
if typename: vars[n]['typename']=typename
if ispure: vars[n]=setattrspec(vars[n],'pure')
if isrec: vars[n]=setattrspec(vars[n],'recursive')
else:
outmess('analyzevars: prefix (%s) were not used\n'%`block['prefix']`)
if not block['block'] in ['module','pythonmodule','python module','block data']:
if 'commonvars' in block:
neededvars=copy.copy(block['args']+block['commonvars'])
else:
neededvars=copy.copy(block['args'])
for n in vars.keys():
if l_or(isintent_callback,isintent_aux)(vars[n]):
neededvars.append(n)
if 'entry' in block:
neededvars.extend(block['entry'].keys())
for k in block['entry'].keys():
for n in block['entry'][k]:
if n not in neededvars:
neededvars.append(n)
if block['block']=='function':
if 'result' in block:
neededvars.append(block['result'])
else:
neededvars.append(block['name'])
if block['block'] in ['subroutine','function']:
name = block['name']
if name in vars and 'intent' in vars[name]:
block['intent'] = vars[name]['intent']
if block['block'] == 'type':
neededvars.extend(vars.keys())
for n in vars.keys():
if n not in neededvars:
del vars[n]
return vars
analyzeargs_re_1 = re.compile(r'\A[a-z]+[\w$]*\Z',re.I)
def expr2name(a, block, args=[]):
orig_a = a
a_is_expr = not analyzeargs_re_1.match(a)
if a_is_expr: # `a` is an expression
implicitrules,attrrules=buildimplicitrules(block)
at=determineexprtype(a,block['vars'],implicitrules)
na='e_'
for c in a:
c = c.lower()
if c not in string.ascii_lowercase+string.digits: c='_'
na=na+c
if na[-1]=='_': na=na+'e'
else: na=na+'_e'
a=na
while a in block['vars'] or a in block['args']:
a=a+'r'
if a in args:
k = 1
while a + str(k) in args:
k = k + 1
a = a + str(k)
if a_is_expr:
block['vars'][a]=at
else:
if a not in block['vars']:
if orig_a in block['vars']:
block['vars'][a] = block['vars'][orig_a]
else:
block['vars'][a]={}
if 'externals' in block and orig_a in block['externals']+block['interfaced']:
block['vars'][a]=setattrspec(block['vars'][a],'external')
return a
def analyzeargs(block):
setmesstext(block)
implicitrules,attrrules=buildimplicitrules(block)
if 'args' not in block:
block['args']=[]
args=[]
for a in block['args']:
a = expr2name(a, block, args)
args.append(a)
block['args']=args
if 'entry' in block:
for k,args1 in block['entry'].items():
for a in args1:
if a not in block['vars']:
block['vars'][a]={}
for b in block['body']:
if b['name'] in args:
if 'externals' not in block:
block['externals']=[]
if b['name'] not in block['externals']:
block['externals'].append(b['name'])
if 'result' in block and block['result'] not in block['vars']:
block['vars'][block['result']]={}
return block
determineexprtype_re_1 = re.compile(r'\A\(.+?[,].+?\)\Z',re.I)
determineexprtype_re_2 = re.compile(r'\A[+-]?\d+(_(P<name>[\w]+)|)\Z',re.I)
determineexprtype_re_3 = re.compile(r'\A[+-]?[\d.]+[\d+-de.]*(_(P<name>[\w]+)|)\Z',re.I)
determineexprtype_re_4 = re.compile(r'\A\(.*\)\Z',re.I)
determineexprtype_re_5 = re.compile(r'\A(?P<name>\w+)\s*\(.*?\)\s*\Z',re.I)
def _ensure_exprdict(r):
if type(r) is type(0):
return {'typespec':'integer'}
if type(r) is type(0.0):
return {'typespec':'real'}
if type(r) is type(0j):
return {'typespec':'complex'}
assert type(r) is type({}),`r`
return r
def determineexprtype(expr,vars,rules={}):
if expr in vars:
return _ensure_exprdict(vars[expr])
expr=expr.strip()
if determineexprtype_re_1.match(expr):
return {'typespec':'complex'}
m=determineexprtype_re_2.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`)
return {'typespec':'integer'}
m = determineexprtype_re_3.match(expr)
if m:
if 'name' in m.groupdict() and m.group('name'):
outmess('determineexprtype: selected kind types not supported (%s)\n'%`expr`)
return {'typespec':'real'}
for op in ['+','-','*','/']:
for e in [x.strip() for x in markoutercomma(expr,comma=op).split('@'+op+'@')]:
if e in vars:
return _ensure_exprdict(vars[e])
t={}
if determineexprtype_re_4.match(expr): # in parenthesis
t=determineexprtype(expr[1:-1],vars,rules)
else:
m = determineexprtype_re_5.match(expr)
if m:
rn=m.group('name')
t=determineexprtype(m.group('name'),vars,rules)
if t and 'attrspec' in t:
del t['attrspec']
if not t:
if rn[0] in rules:
return _ensure_exprdict(rules[rn[0]])
if expr[0] in '\'"':
return {'typespec':'character','charselector':{'*':'*'}}
if not t:
outmess('determineexprtype: could not determine expressions (%s) type.\n'%(`expr`))
return t
######
def crack2fortrangen(block,tab='\n', as_interface=False):
global skipfuncs, onlyfuncs
setmesstext(block)
ret=''
if isinstance(block, list):
for g in block:
if g and g['block'] in ['function','subroutine']:
if g['name'] in skipfuncs:
continue
if onlyfuncs and g['name'] not in onlyfuncs:
continue
ret=ret+crack2fortrangen(g,tab,as_interface=as_interface)
return ret
prefix=''
name=''
args=''
blocktype=block['block']
if blocktype=='program': return ''
argsl = []
if 'name' in block:
name=block['name']
if 'args' in block:
vars = block['vars']
for a in block['args']:
a = expr2name(a, block, argsl)
if not isintent_callback(vars[a]):
argsl.append(a)
if block['block']=='function' or argsl:
args='(%s)'%','.join(argsl)
f2pyenhancements = ''
if 'f2pyenhancements' in block:
for k in block['f2pyenhancements'].keys():
f2pyenhancements = '%s%s%s %s'%(f2pyenhancements,tab+tabchar,k,block['f2pyenhancements'][k])
intent_lst = block.get('intent',[])[:]
if blocktype=='function' and 'callback' in intent_lst:
intent_lst.remove('callback')
if intent_lst:
f2pyenhancements = '%s%sintent(%s) %s'%\
(f2pyenhancements,tab+tabchar,
','.join(intent_lst),name)
use=''
if 'use' in block:
use=use2fortran(block['use'],tab+tabchar)
common=''
if 'common' in block:
common=common2fortran(block['common'],tab+tabchar)
if name=='unknown_interface': name=''
result=''
if 'result' in block:
result=' result (%s)'%block['result']
if block['result'] not in argsl:
argsl.append(block['result'])
#if 'prefix' in block:
# prefix=block['prefix']+' '
body=crack2fortrangen(block['body'],tab+tabchar)
vars=vars2fortran(block,block['vars'],argsl,tab+tabchar, as_interface=as_interface)
mess=''
if 'from' in block and not as_interface:
mess='! in %s'%block['from']
if 'entry' in block:
entry_stmts = ''
for k,i in block['entry'].items():
entry_stmts = '%s%sentry %s(%s)' \
% (entry_stmts,tab+tabchar,k,','.join(i))
body = body + entry_stmts
if blocktype=='block data' and name=='_BLOCK_DATA_':
name = ''
ret='%s%s%s %s%s%s %s%s%s%s%s%s%send %s %s'%(tab,prefix,blocktype,name,args,result,mess,f2pyenhancements,use,vars,common,body,tab,blocktype,name)
return ret
def common2fortran(common,tab=''):
ret=''
for k in common.keys():
if k=='_BLNK_':
ret='%s%scommon %s'%(ret,tab,','.join(common[k]))
else:
ret='%s%scommon /%s/ %s'%(ret,tab,k,','.join(common[k]))
return ret
def use2fortran(use,tab=''):
ret=''
for m in use.keys():
ret='%s%suse %s,'%(ret,tab,m)
if use[m]=={}:
if ret and ret[-1]==',': ret=ret[:-1]
continue
if 'only' in use[m] and use[m]['only']:
ret='%s only:'%(ret)
if 'map' in use[m] and use[m]['map']:
c=' '
for k in use[m]['map'].keys():
if k==use[m]['map'][k]:
ret='%s%s%s'%(ret,c,k); c=','
else:
ret='%s%s%s=>%s'%(ret,c,k,use[m]['map'][k]); c=','
if ret and ret[-1]==',': ret=ret[:-1]
return ret
def true_intent_list(var):
lst = var['intent']
ret = []
for intent in lst:
try:
c = eval('isintent_%s(var)' % intent)
except NameError:
c = 0
if c:
ret.append(intent)
return ret
def vars2fortran(block,vars,args,tab='', as_interface=False):
"""
TODO:
public sub
...
"""
setmesstext(block)
ret=''
nout=[]
for a in args:
if a in block['vars']:
nout.append(a)
if 'commonvars' in block:
for a in block['commonvars']:
if a in vars:
if a not in nout:
nout.append(a)
else:
errmess('vars2fortran: Confused?!: "%s" is not defined in vars.\n'%a)
if 'varnames' in block:
nout.extend(block['varnames'])
if not as_interface:
for a in vars.keys():
if a not in nout:
nout.append(a)
for a in nout:
if 'depend' in vars[a]:
for d in vars[a]['depend']:
if d in vars and 'depend' in vars[d] and a in vars[d]['depend']:
errmess('vars2fortran: Warning: cross-dependence between variables "%s" and "%s"\n'%(a,d))
if 'externals' in block and a in block['externals']:
if isintent_callback(vars[a]):
ret='%s%sintent(callback) %s'%(ret,tab,a)
ret='%s%sexternal %s'%(ret,tab,a)
if isoptional(vars[a]):
ret='%s%soptional %s'%(ret,tab,a)
if a in vars and 'typespec' not in vars[a]:
continue
cont=1
for b in block['body']:
if a==b['name'] and b['block']=='function':
cont=0;break
if cont:
continue
if a not in vars:
show(vars)
outmess('vars2fortran: No definition for argument "%s".\n'%a)
continue
if a==block['name'] and not block['block']=='function':
continue
if 'typespec' not in vars[a]:
if 'attrspec' in vars[a] and 'external' in vars[a]['attrspec']:
if a in args:
ret='%s%sexternal %s'%(ret,tab,a)
continue
show(vars[a])
outmess('vars2fortran: No typespec for argument "%s".\n'%a)
continue
vardef=vars[a]['typespec']
if vardef=='type' and 'typename' in vars[a]:
vardef='%s(%s)'%(vardef,vars[a]['typename'])
selector={}
if 'kindselector' in vars[a]:
selector=vars[a]['kindselector']
elif 'charselector' in vars[a]:
selector=vars[a]['charselector']
if '*' in selector:
if selector['*'] in ['*',':']:
vardef='%s*(%s)'%(vardef,selector['*'])
else:
vardef='%s*%s'%(vardef,selector['*'])
else:
if 'len' in selector:
vardef='%s(len=%s'%(vardef,selector['len'])
if 'kind' in selector:
vardef='%s,kind=%s)'%(vardef,selector['kind'])
else:
vardef='%s)'%(vardef)
elif 'kind' in selector:
vardef='%s(kind=%s)'%(vardef,selector['kind'])
c=' '
if 'attrspec' in vars[a]:
attr=[]
for l in vars[a]['attrspec']:
if l not in ['external']:
attr.append(l)
if attr:
vardef='%s, %s'%(vardef,','.join(attr))
c=','
if 'dimension' in vars[a]:
# if not isintent_c(vars[a]):
# vars[a]['dimension'].reverse()
vardef='%s%sdimension(%s)'%(vardef,c,','.join(vars[a]['dimension']))
c=','
if 'intent' in vars[a]:
lst = true_intent_list(vars[a])
if lst:
vardef='%s%sintent(%s)'%(vardef,c,','.join(lst))
c=','
if 'check' in vars[a]:
vardef='%s%scheck(%s)'%(vardef,c,','.join(vars[a]['check']))
c=','
if 'depend' in vars[a]:
vardef='%s%sdepend(%s)'%(vardef,c,','.join(vars[a]['depend']))
c=','
if '=' in vars[a]:
v = vars[a]['=']
if vars[a]['typespec'] in ['complex','double complex']:
try:
v = eval(v)
v = '(%s,%s)' % (v.real,v.imag)
except:
pass
vardef='%s :: %s=%s'%(vardef,a,v)
else:
vardef='%s :: %s'%(vardef,a)
ret='%s%s%s'%(ret,tab,vardef)
return ret
######
def crackfortran(files):
global usermodules
outmess('Reading fortran codes...\n',0)
readfortrancode(files,crackline)
outmess('Post-processing...\n',0)
usermodules=[]
postlist=postcrack(grouplist[0])
outmess('Post-processing (stage 2)...\n',0)
postlist=postcrack2(postlist)
return usermodules+postlist
def crack2fortran(block):
global f2py_version
pyf=crack2fortrangen(block)+'\n'
header="""! -*- f90 -*-
! Note: the context of this file is case sensitive.
"""
footer="""
! This file was auto-generated with f2py (version:%s).
! See http://cens.ioc.ee/projects/f2py2e/
"""%(f2py_version)
return header+pyf+footer
if __name__ == "__main__":
files=[]
funcs=[]
f=1;f2=0;f3=0
showblocklist=0
for l in sys.argv[1:]:
if l=='': pass
elif l[0]==':':
f=0
elif l=='-quiet':
quiet=1
verbose=0
elif l=='-verbose':
verbose=2
quiet=0
elif l=='-fix':
if strictf77:
outmess('Use option -f90 before -fix if Fortran 90 code is in fix form.\n',0)
skipemptyends=1
sourcecodeform='fix'
elif l=='-skipemptyends':
skipemptyends=1
elif l=='--ignore-contains':
ignorecontains=1
elif l=='-f77':
strictf77=1
sourcecodeform='fix'
elif l=='-f90':
strictf77=0
sourcecodeform='free'
skipemptyends=1
elif l=='-h':
f2=1
elif l=='-show':
showblocklist=1
elif l=='-m':
f3=1
elif l[0]=='-':
errmess('Unknown option %s\n'%`l`)
elif f2:
f2=0
pyffilename=l
elif f3:
f3=0
f77modulename=l
elif f:
try:
open(l).close()
files.append(l)
except IOError,detail:
errmess('IOError: %s\n'%str(detail))
else:
funcs.append(l)
if not strictf77 and f77modulename and not skipemptyends:
outmess("""\
Warning: You have specifyied module name for non Fortran 77 code
that should not need one (expect if you are scanning F90 code
for non module blocks but then you should use flag -skipemptyends
and also be sure that the files do not contain programs without program statement).
""",0)
postlist=crackfortran(files,funcs)
if pyffilename:
outmess('Writing fortran code to file %s\n'%`pyffilename`,0)
pyf=crack2fortran(postlist)
f=open(pyffilename,'w')
f.write(pyf)
f.close()
if showblocklist:
show(postlist)
| bsd-3-clause | -1,710,996,368,748,892,400 | 40.398515 | 246 | 0.50313 | false | 3.688912 | false | false | false | 0.024685 |
ahmetcemturan/SFACT | fabmetheus_utilities/geometry/geometry_utilities/boolean_geometry.py | 8 | 8191 | """
This page is in the table of contents.
The xml.py script is an import translator plugin to get a carving from an Art of Illusion xml file.
An import plugin is a script in the interpret_plugins folder which has the function getCarving. It is meant to be run from the interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name.
The getCarving function takes the file name of an xml file and returns the carving.
An xml file can be exported from Art of Illusion by going to the "File" menu, then going into the "Export" menu item, then picking the XML choice. This will bring up the XML file chooser window, choose a place to save the file then click "OK". Leave the "compressFile" checkbox unchecked. All the objects from the scene will be exported, this plugin will ignore the light and camera. If you want to fabricate more than one object at a time, you can have multiple objects in the Art of Illusion scene and they will all be carved, then fabricated together.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_utilities.evaluate_elements import setting
from fabmetheus_utilities.geometry.geometry_utilities import boolean_solid
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.geometry.solids import triangle_mesh
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import settings
from fabmetheus_utilities import xml_simple_writer
import math
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getEmptyZLoops(archivableObjects, importRadius, shouldPrintWarning, z, zoneArrangement):
'Get loops at empty z level.'
emptyZ = zoneArrangement.getEmptyZ(z)
visibleObjects = evaluate.getVisibleObjects(archivableObjects)
visibleObjectLoopsList = boolean_solid.getVisibleObjectLoopsList(importRadius, visibleObjects, emptyZ)
loops = euclidean.getConcatenatedList(visibleObjectLoopsList)
if euclidean.isLoopListIntersecting(loops):
loops = boolean_solid.getLoopsUnion(importRadius, visibleObjectLoopsList)
if shouldPrintWarning:
print('Warning, the triangle mesh slice intersects itself in getExtruderPaths in boolean_geometry.')
print('Something will still be printed, but there is no guarantee that it will be the correct shape.')
print('Once the gcode is saved, you should check over the layer with a z of:')
print(z)
return loops
def getLoopLayers(archivableObjects, importRadius, layerHeight, maximumZ, shouldPrintWarning, z, zoneArrangement):
'Get loop layers.'
loopLayers = []
while z <= maximumZ:
triangle_mesh.getLoopLayerAppend(loopLayers, z).loops = getEmptyZLoops(archivableObjects, importRadius, True, z, zoneArrangement)
z += layerHeight
return loopLayers
def getMinimumZ(geometryObject):
'Get the minimum of the minimum z of the archivableObjects and the object.'
booleanGeometry = BooleanGeometry()
booleanGeometry.archivableObjects = geometryObject.archivableObjects
booleanGeometry.importRadius = setting.getImportRadius(geometryObject.elementNode)
booleanGeometry.layerHeight = setting.getLayerHeight(geometryObject.elementNode)
archivableMinimumZ = booleanGeometry.getMinimumZ()
geometryMinimumZ = geometryObject.getMinimumZ()
if archivableMinimumZ == None:
return geometryMinimumZ
if geometryMinimumZ == None:
return archivableMinimumZ
return min(archivableMinimumZ, geometryMinimumZ)
class BooleanGeometry:
'A boolean geometry scene.'
def __init__(self):
'Add empty lists.'
self.archivableObjects = []
self.belowLoops = []
self.importRadius = 0.6
self.layerHeight = 0.4
self.loopLayers = []
def __repr__(self):
'Get the string representation of this carving.'
elementNode = None
if len(self.archivableObjects) > 0:
elementNode = self.archivableObjects[0].elementNode
output = xml_simple_writer.getBeginGeometryXMLOutput(elementNode)
self.addXML( 1, output )
return xml_simple_writer.getEndGeometryXMLString(output)
def addXML(self, depth, output):
'Add xml for this object.'
xml_simple_writer.addXMLFromObjects( depth, self.archivableObjects, output )
def getCarveBoundaryLayers(self):
'Get the boundary layers.'
if self.getMinimumZ() == None:
return []
z = self.minimumZ + 0.5 * self.layerHeight
self.loopLayers = getLoopLayers(self.archivableObjects, self.importRadius, self.layerHeight, self.maximumZ, True, z, self.zoneArrangement)
self.cornerMaximum = Vector3(-912345678.0, -912345678.0, -912345678.0)
self.cornerMinimum = Vector3(912345678.0, 912345678.0, 912345678.0)
for loopLayer in self.loopLayers:
for loop in loopLayer.loops:
for point in loop:
pointVector3 = Vector3(point.real, point.imag, loopLayer.z)
self.cornerMaximum.maximize(pointVector3)
self.cornerMinimum.minimize(pointVector3)
self.cornerMaximum.z += self.halfHeight
self.cornerMinimum.z -= self.halfHeight
for loopLayerIndex in xrange(len(self.loopLayers) -1, -1, -1):
loopLayer = self.loopLayers[loopLayerIndex]
if len(loopLayer.loops) > 0:
return self.loopLayers[: loopLayerIndex + 1]
return []
def getCarveCornerMaximum(self):
'Get the corner maximum of the vertexes.'
return self.cornerMaximum
def getCarveCornerMinimum(self):
'Get the corner minimum of the vertexes.'
return self.cornerMinimum
def getCarveLayerHeight(self):
'Get the layer height.'
return self.layerHeight
def getFabmetheusXML(self):
'Return the fabmetheus XML.'
if len(self.archivableObjects) > 0:
return self.archivableObjects[0].elementNode.getOwnerDocument().getOriginalRoot()
return None
def getInterpretationSuffix(self):
'Return the suffix for a boolean carving.'
return 'xml'
def getMatrix4X4(self):
'Get the matrix4X4.'
return None
def getMatrixChainTetragrid(self):
'Get the matrix chain tetragrid.'
return None
def getMinimumZ(self):
'Get the minimum z.'
vertexes = []
for visibleObject in evaluate.getVisibleObjects(self.archivableObjects):
vertexes += visibleObject.getTransformedVertexes()
if len(vertexes) < 1:
return None
self.maximumZ = -912345678.0
self.minimumZ = 912345678.0
for vertex in vertexes:
self.maximumZ = max(self.maximumZ, vertex.z)
self.minimumZ = min(self.minimumZ, vertex.z)
self.zoneArrangement = triangle_mesh.ZoneArrangement(self.layerHeight, vertexes)
self.halfHeight = 0.5 * self.layerHeight
self.setActualMinimumZ()
return self.minimumZ
def getNumberOfEmptyZLoops(self, z):
'Get number of empty z loops.'
return len(getEmptyZLoops(self.archivableObjects, self.importRadius, False, z, self.zoneArrangement))
def setActualMinimumZ(self):
'Get the actual minimum z at the lowest rotated boundary layer.'
halfHeightOverMyriad = 0.0001 * self.halfHeight
while self.minimumZ < self.maximumZ:
if self.getNumberOfEmptyZLoops(self.minimumZ + halfHeightOverMyriad) > 0:
if self.getNumberOfEmptyZLoops(self.minimumZ - halfHeightOverMyriad) < 1:
return
increment = -self.halfHeight
while abs(increment) > halfHeightOverMyriad:
self.minimumZ += increment
increment = 0.5 * abs(increment)
if self.getNumberOfEmptyZLoops(self.minimumZ) > 0:
increment = -increment
self.minimumZ = round(self.minimumZ, -int(round(math.log10(halfHeightOverMyriad) + 1.5)))
return
self.minimumZ += self.layerHeight
def setCarveImportRadius( self, importRadius ):
'Set the import radius.'
self.importRadius = importRadius
def setCarveIsCorrectMesh( self, isCorrectMesh ):
'Set the is correct mesh flag.'
self.isCorrectMesh = isCorrectMesh
def setCarveLayerHeight( self, layerHeight ):
'Set the layer height.'
self.layerHeight = layerHeight
| agpl-3.0 | 2,393,688,213,450,307,600 | 40.790816 | 558 | 0.770846 | false | 3.38751 | false | false | false | 0.021609 |
mne-tools/mne-tools.github.io | dev/_downloads/198a4f1929f445bf24fad1876c4a9b13/psf_ctf_vertices_lcmv.py | 5 | 5353 | # -*- coding: utf-8 -*-
"""
=================================================
Compute cross-talk functions for LCMV beamformers
=================================================
Visualise cross-talk functions at one vertex for LCMV beamformers computed
with different data covariance matrices, which affects their cross-talk
functions.
"""
# Author: Olaf Hauk <olaf.hauk@mrc-cbu.cam.ac.uk>
#
# License: BSD (3-clause)
import mne
from mne.datasets import sample
from mne.beamformer import make_lcmv, make_lcmv_resolution_matrix
from mne.minimum_norm import get_cross_talk
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects/'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
fname_evo = data_path + '/MEG/sample/sample_audvis-ave.fif'
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Read raw data
raw = mne.io.read_raw_fif(raw_fname)
# only pick good EEG/MEG sensors
raw.info['bads'] += ['EEG 053'] # bads + 1 more
picks = mne.pick_types(raw.info, meg=True, eeg=True, exclude='bads')
# Find events
events = mne.find_events(raw)
# event_id = {'aud/l': 1, 'aud/r': 2, 'vis/l': 3, 'vis/r': 4}
event_id = {'vis/l': 3, 'vis/r': 4}
tmin, tmax = -.2, .25 # epoch duration
epochs = mne.Epochs(raw, events, event_id=event_id, tmin=tmin, tmax=tmax,
picks=picks, baseline=(-.2, 0.), preload=True)
del raw
# covariance matrix for pre-stimulus interval
tmin, tmax = -.2, 0.
cov_pre = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax,
method='empirical')
# covariance matrix for post-stimulus interval (around main evoked responses)
tmin, tmax = 0.05, .25
cov_post = mne.compute_covariance(epochs, tmin=tmin, tmax=tmax,
method='empirical')
info = epochs.info
del epochs
# read forward solution
forward = mne.read_forward_solution(fname_fwd)
# use forward operator with fixed source orientations
mne.convert_forward_solution(forward, surf_ori=True,
force_fixed=True, copy=False)
# read noise covariance matrix
noise_cov = mne.read_cov(fname_cov)
# regularize noise covariance (we used 'empirical' above)
noise_cov = mne.cov.regularize(noise_cov, info, mag=0.1, grad=0.1,
eeg=0.1, rank='info')
##############################################################################
# Compute LCMV filters with different data covariance matrices
# ------------------------------------------------------------
# compute LCMV beamformer filters for pre-stimulus interval
filters_pre = make_lcmv(info, forward, cov_pre, reg=0.05,
noise_cov=noise_cov,
pick_ori=None, rank=None,
weight_norm=None,
reduce_rank=False,
verbose=False)
# compute LCMV beamformer filters for post-stimulus interval
filters_post = make_lcmv(info, forward, cov_post, reg=0.05,
noise_cov=noise_cov,
pick_ori=None, rank=None,
weight_norm=None,
reduce_rank=False,
verbose=False)
##############################################################################
# Compute resolution matrices for the two LCMV beamformers
# --------------------------------------------------------
# compute cross-talk functions (CTFs) for one target vertex
sources = [3000]
verttrue = [forward['src'][0]['vertno'][sources[0]]] # pick one vertex
rm_pre = make_lcmv_resolution_matrix(filters_pre, forward, info)
stc_pre = get_cross_talk(rm_pre, forward['src'], sources, norm=True)
del rm_pre
##############################################################################
rm_post = make_lcmv_resolution_matrix(filters_post, forward, info)
stc_post = get_cross_talk(rm_post, forward['src'], sources, norm=True)
del rm_post
##############################################################################
# Visualize
# ---------
# Pre:
brain_pre = stc_pre.plot('sample', 'inflated', 'lh', subjects_dir=subjects_dir,
figure=1, clim=dict(kind='value', lims=(0, .2, .4)))
brain_pre.add_text(0.1, 0.9, 'LCMV beamformer with pre-stimulus\ndata '
'covariance matrix', 'title', font_size=16)
# mark true source location for CTFs
brain_pre.add_foci(verttrue, coords_as_verts=True, scale_factor=1., hemi='lh',
color='green')
###############################################################################
# Post:
brain_post = stc_post.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir,
figure=2, clim=dict(kind='value', lims=(0, .2, .4)))
brain_post.add_text(0.1, 0.9, 'LCMV beamformer with post-stimulus\ndata '
'covariance matrix', 'title', font_size=16)
brain_post.add_foci(verttrue, coords_as_verts=True, scale_factor=1.,
hemi='lh', color='green')
###############################################################################
# The pre-stimulus beamformer's CTF has lower values in parietal regions
# suppressed alpha activity?) but larger values in occipital regions (less
# suppression of visual activity?).
| bsd-3-clause | 1,726,596,400,422,397,400 | 37.510791 | 79 | 0.555203 | false | 3.573431 | false | false | false | 0 |
abinashk-inf/AstroBox | src/ext/makerbot_driver/GcodeAssembler.py | 6 | 7063 | from __future__ import absolute_import
import json
import makerbot_driver
"""
A machine profile object that holds all values for a specific profile.
"""
import json
import os
import re
import logging
GcodeRecipes = {
"PLA" : {
"print_start_sequence" : {
"heat_platform" : "no_heat"
},
"print_end_sequence" : {
"cool_platform" : "no_cool"
},
"variables" : {
"TOOL_0_TEMP" : 230,
"TOOL_1_TEMP" : 230
}
},
"ABS" : {
"print_start_sequence" : {
"heat_platform" : "heat_platform"
},
"print_end_sequence" : {
"cool_platform" : "cool_platform"
},
"variables" : {
"TOOL_0_TEMP" : 230,
"TOOL_1_TEMP" : 230,
"PLATFORM_TEMP" : 110
}
},
"dualstrusion": {
"print_start_sequence" : {
"heat_tools" : "dualstrusion"
},
"print_end_sequence" : {
"cool_tools" : "dualstrusion"
},
"variables" : {}
}
}
class GcodeAssembler(object):
"""
An assembler that builds start and end gcodes.
In makerbot_driver/profiles/recipes.json there are
several recipes defined, each with a set of routines.
"""
def __init__(self, machine_profile, profiledir=None):
self.machine_profile = machine_profile
self.start_order = [
'begin_print',
'homing',
'start_position',
'heat_platform',
'heat_tools',
'end_start_sequence',
]
self.end_order = [
'end_position',
'cool_platform',
'cool_tools',
'end_print',
]
self.recipes = GcodeRecipes
def assemble_recipe(self,
material='PLA',
tool_0=True,
tool_1=False,
begin_print='replicator_begin',
homing='replicator_homing',
start_position='replicator_start_position',
end_start_sequence='replicator_end_start_sequence',
end_position='replicator_end_position',
end_print='replicator_end',
heat_platform_override=False,
no_heat_platform_override=False,
):
"""
The recipe assembler. Has several built in
defaults a user could use to create a generic
sequence recipe. If both tool_0 and tool_1 are
set to true, will assume it should output in
dualstrusion mode.
@return dict start_recipe: The recipe used to
build the print start sequence.
@return dict end_recipe: The recipe used to
build the print end sequence.
@return dict variables: The default variables
used by the gcode parser.
"""
start_recipe = {}
end_recipe = {}
variables = {}
#Check for dualstrusion
if tool_0 and tool_1:
dual_start_recipe, dual_end_recipe, dual_variables = self.get_recipes_and_variables('dualstrusion')
start_recipe.update(dual_start_recipe)
end_recipe.update(dual_end_recipe)
variables.update(dual_variables)
elif tool_0:
#Update start routine
start_recipe.update({'heat_tools': 'heat_0'})
#Update end routine
end_recipe.update({'cool_tools': 'cool_0'})
elif tool_1:
#Update start routine
start_recipe.update({'heat_tools': 'heat_1'})
#Update end routine
end_recipe.update({'cool_tools': 'cool_1'})
#Add material values to the return template values
mat_start_recipe, mat_end_recipe, mat_variables = self.get_recipes_and_variables(material)
start_recipe.update(mat_start_recipe)
end_recipe.update(mat_end_recipe)
variables.update(mat_variables)
start_recipe.update({
'begin_print': begin_print,
'homing': homing,
'start_position': start_position,
'end_start_sequence': end_start_sequence,
})
end_recipe.update({
'end_position': end_position,
'end_print': end_print
})
if heat_platform_override:
start_recipe.update({'heat_platform': 'heat_platform'})
end_recipe.update({'cool_platform': 'cool_platform'})
if no_heat_platform_override:
start_recipe.update({'heat_platform': None})
end_recipe.update({'cool_platform': None})
return start_recipe, end_recipe, variables
def assemble_start_sequence(self, recipe):
"""
Given a start recipe, assembles the correct sequence
@param recipe: The recipe used to create the sequence
@return list gcodes: Sequence of gcodes derived from the recipe
"""
order = self.start_order
template_name = 'print_start_sequence'
gcodes = self.assemble_sequence_from_recipe(
recipe, template_name, order)
return gcodes
def assemble_end_sequence(self, recipe):
"""
Given an end recipe, assembles the correct sequence
@param recipe: The recipe used to create the sequence
@return list gcodes: Sequence of gcodes derived from the recipe
"""
order = self.end_order
template_name = 'print_end_sequence'
gcodes = self.assemble_sequence_from_recipe(
recipe, template_name, order)
return gcodes
def assemble_sequence_from_recipe(self, recipe, template_name, order):
"""
Given a recipe, template_name and ordering creates the correct
sequence.
@param recipe: The recipe used to create the sequence
@param template_name: The name of the template we want to use (start/end)
@param order: The correct ordering of routines
@return list gcodes: Sequence of gcodes derived from the recipe.
"""
gcodes = []
template = self.machine_profile.values[template_name]
for routine in order:
if recipe[routine] is not None:
gcodes.extend(template[routine][recipe[routine]])
return gcodes
def get_recipes_and_variables(self, key):
"""
Given a recipe (i.e. PLA, ABS, dualstrusion), gets its start
routines, end routines and variables.
@param key: Name of the recipe we want to access
@return dict start_routines: The start routines associated with this key
@return dict end_routines: The end routines associated with this key
@return dict variables: The variables associated with this key
"""
if not key in self.recipes:
raise makerbot_driver.RecipeNotFoundError
values = self.recipes[key]
start_routines = values['print_start_sequence']
end_routines = values['print_end_sequence']
variables = values['variables']
return start_routines, end_routines, variables
| agpl-3.0 | -506,478,922,230,023,700 | 32.794258 | 111 | 0.576242 | false | 4.201666 | false | false | false | 0.005097 |
ntt-sic/neutron | neutron/tests/unit/ml2/drivers/mechanism_test.py | 9 | 4820 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.extensions import portbindings
from neutron.plugins.ml2 import driver_api as api
class TestMechanismDriver(api.MechanismDriver):
"""Test mechanism driver for testing mechanism driver api."""
def initialize(self):
pass
def _check_network_context(self, context, original_expected):
assert(isinstance(context, api.NetworkContext))
assert(isinstance(context.current, dict))
assert(context.current['id'] is not None)
if original_expected:
assert(isinstance(context.original, dict))
assert(context.current['id'] == context.original['id'])
else:
assert(not context.original)
assert(context.network_segments)
def create_network_precommit(self, context):
self._check_network_context(context, False)
def create_network_postcommit(self, context):
self._check_network_context(context, False)
def update_network_precommit(self, context):
self._check_network_context(context, True)
def update_network_postcommit(self, context):
self._check_network_context(context, True)
def delete_network_precommit(self, context):
self._check_network_context(context, False)
def delete_network_postcommit(self, context):
self._check_network_context(context, False)
def _check_subnet_context(self, context, original_expected):
assert(isinstance(context, api.SubnetContext))
assert(isinstance(context.current, dict))
assert(context.current['id'] is not None)
if original_expected:
assert(isinstance(context.original, dict))
assert(context.current['id'] == context.original['id'])
else:
assert(not context.original)
def create_subnet_precommit(self, context):
self._check_subnet_context(context, False)
def create_subnet_postcommit(self, context):
self._check_subnet_context(context, False)
def update_subnet_precommit(self, context):
self._check_subnet_context(context, True)
def update_subnet_postcommit(self, context):
self._check_subnet_context(context, True)
def delete_subnet_precommit(self, context):
self._check_subnet_context(context, False)
def delete_subnet_postcommit(self, context):
self._check_subnet_context(context, False)
def _check_port_context(self, context, original_expected):
assert(isinstance(context, api.PortContext))
assert(isinstance(context.current, dict))
assert(context.current['id'] is not None)
if original_expected:
assert(isinstance(context.original, dict))
assert(context.current['id'] == context.original['id'])
else:
assert(not context.original)
network_context = context.network
assert(isinstance(network_context, api.NetworkContext))
self._check_network_context(network_context, False)
def create_port_precommit(self, context):
self._check_port_context(context, False)
def create_port_postcommit(self, context):
self._check_port_context(context, False)
def update_port_precommit(self, context):
self._check_port_context(context, True)
def update_port_postcommit(self, context):
self._check_port_context(context, True)
def delete_port_precommit(self, context):
self._check_port_context(context, False)
def delete_port_postcommit(self, context):
self._check_port_context(context, False)
def bind_port(self, context):
self._check_port_context(context, False)
host = context.current.get(portbindings.HOST_ID, None)
segment = context.network.network_segments[0][api.ID]
if host == "host-ovs-no_filter":
context.set_binding(segment, portbindings.VIF_TYPE_OVS, False)
elif host == "host-bridge-filter":
context.set_binding(segment, portbindings.VIF_TYPE_BRIDGE, True)
def validate_port_binding(self, context):
self._check_port_context(context, False)
return True
def unbind_port(self, context):
self._check_port_context(context, False)
| apache-2.0 | -6,802,861,688,705,456,000 | 36.65625 | 78 | 0.677801 | false | 4.043624 | false | false | false | 0.004149 |
dmwu/sparrow | deploy/third_party/boto-2.1.1/boto/vpc/customergateway.py | 35 | 1961 | # Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a Customer Gateway
"""
from boto.ec2.ec2object import TaggedEC2Object
class CustomerGateway(TaggedEC2Object):
def __init__(self, connection=None):
TaggedEC2Object.__init__(self, connection)
self.id = None
self.type = None
self.state = None
self.ip_address = None
self.bgp_asn = None
def __repr__(self):
return 'CustomerGateway:%s' % self.id
def endElement(self, name, value, connection):
if name == 'customerGatewayId':
self.id = value
elif name == 'ipAddress':
self.ip_address = value
elif name == 'type':
self.type = value
elif name == 'state':
self.state = value
elif name == 'bgpAsn':
self.bgp_asn = value
else:
setattr(self, name, value)
| apache-2.0 | -5,308,154,188,106,075,000 | 35.314815 | 74 | 0.676186 | false | 4.208155 | false | false | false | 0.00204 |
kovacsbalu/ansible-modules-extras | cloud/amazon/ec2_elb_facts.py | 34 | 5261 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_elb_facts
short_description: Gather facts about EC2 Elastic Load Balancers in AWS
description:
- Gather facts about EC2 Elastic Load Balancers in AWS
version_added: "2.0"
author: "Michael Schultz (github.com/mjschultz)"
options:
names:
description:
- List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
required: false
default: null
aliases: ['elb_ids', 'ec2_elbs']
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Output format tries to match ec2_elb_lb module input parameters
# Gather facts about all ELBs
- action:
module: ec2_elb_facts
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
with_items: elb_facts.elbs
# Gather facts about a particular ELB
- action:
module: ec2_elb_facts
names: frontend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ elb_facts.elbs.0.dns_name }}"
# Gather facts about a set of ELBs
- action:
module: ec2_elb_facts
names:
- frontend-prod-elb
- backend-prod-elb
register: elb_facts
- action:
module: debug
msg: "{{ item.dns_name }}"
with_items: elb_facts.elbs
'''
import xml.etree.ElementTree as ET
try:
import boto.ec2.elb
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_error_message(xml_string):
root = ET.fromstring(xml_string)
for message in root.findall('.//Message'):
return message.text
def get_elb_listeners(listeners):
listener_list = []
for listener in listeners:
listener_dict = {
'load_balancer_port': listener[0],
'instance_port': listener[1],
'protocol': listener[2],
}
try:
ssl_certificate_id = listener[4]
except IndexError:
pass
else:
if ssl_certificate_id:
listener_dict['ssl_certificate_id'] = ssl_certificate_id
listener_list.append(listener_dict)
return listener_list
def get_health_check(health_check):
protocol, port_path = health_check.target.split(':')
try:
port, path = port_path.split('/')
path = '/{}'.format(path)
except ValueError:
port = port_path
path = None
health_check_dict = {
'ping_protocol': protocol.lower(),
'ping_port': int(port),
'response_timeout': health_check.timeout,
'interval': health_check.interval,
'unhealthy_threshold': health_check.unhealthy_threshold,
'healthy_threshold': health_check.healthy_threshold,
}
if path:
health_check_dict['ping_path'] = path
return health_check_dict
def get_elb_info(elb):
elb_info = {
'name': elb.name,
'zones': elb.availability_zones,
'dns_name': elb.dns_name,
'instances': [instance.id for instance in elb.instances],
'listeners': get_elb_listeners(elb.listeners),
'scheme': elb.scheme,
'security_groups': elb.security_groups,
'health_check': get_health_check(elb.health_check),
'subnets': elb.subnets,
}
if elb.vpc_id:
elb_info['vpc_id'] = elb.vpc_id
return elb_info
def list_elb(connection, module):
elb_names = module.params.get("names")
if not elb_names:
elb_names = None
try:
all_elbs = connection.get_all_load_balancers(elb_names)
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
elb_array = []
for elb in all_elbs:
elb_array.append(get_elb_info(elb))
module.exit_json(elbs=elb_array)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
names={'default': None, 'type': 'list'}
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
list_elb(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 | 1,792,561,067,728,397,300 | 25.979487 | 136 | 0.647786 | false | 3.656011 | false | false | false | 0.001331 |
mmmaaaxxx77/Python-Django-AdminLTE2 | HypermediaDemo/apps/ripozo/CustomAdapter.py | 1 | 4002 | import json
from ripozo.adapters import AdapterBase
from ripozo.resources.constants import input_categories
from ripozo.resources.resource_base import create_url
from ripozo.utilities import titlize_endpoint
import six
_CONTENT_TYPE = 'application/json'
class CustomAdapter(AdapterBase):
formats = ['json', _CONTENT_TYPE]
extra_headers = {'Content-Type': _CONTENT_TYPE}
@property
def formatted_body(self):
if self.status_code == 204:
return ''
links = self.generate_links()
entities = self.get_entities()
response = dict(properties=self.resource.properties, actions=self._actions,
links=links, datas=entities)
return json.dumps(response)
@property
def _actions(self):
actions = []
for endpoint, options in six.iteritems(self.resource.endpoint_dictionary()):
options = options[0]
all_methods = options.get('methods', ('GET',))
meth = all_methods[0] if all_methods else 'GET'
base_route = options.get('route', self.resource.base_url)
route = create_url(base_route, **self.resource.properties)
route = self.combine_base_url_with_resource_url(route)
fields = self.generate_fields_for_endpoint_funct(options.get('endpoint_func'))
actn = dict(name=endpoint, title=titlize_endpoint(endpoint),
method=meth, href=route, fields=fields)
actions.append(actn)
return actions
def generate_fields_for_endpoint_funct(self, endpoint_func):
return_fields = []
fields_method = getattr(endpoint_func, 'fields', None)
if not fields_method:
return []
fields = fields_method(self.resource.manager)
for field in fields:
if field.arg_type is input_categories.URL_PARAMS:
continue
field_dict = dict(name=field.name, type=field.field_type.__name__,
location=field.arg_type, required=field.required)
return_fields.append(field_dict)
return return_fields
def generate_links(self):
href = self.combine_base_url_with_resource_url(self.resource.url)
links = [dict(rel=['self'], href=href)]
for link, link_name, embedded in self.resource.linked_resources:
links.append(dict(rel=[link_name],
href=self.combine_base_url_with_resource_url(link.url)))
return links
def get_entities(self):
entities = []
for resource, name, embedded in self.resource.related_resources:
for ent in self.generate_entity(resource, name, embedded):
entities.append(ent)
return entities
def generate_entity(self, resource, name, embedded):
if isinstance(resource, list):
for res in resource:
for ent in self.generate_entity(res, name, embedded):
yield ent
else:
if not resource.has_all_pks:
return
ent = {}#{'class': [resource.resource_name], 'rel': [name]}
resource_url = self.combine_base_url_with_resource_url(resource.url)
if not embedded:
ent['href'] = resource_url
ent['data'] = resource.properties
else:
pass
#ent['properties'] = resource.properties
#ent['links'] = [dict(rel=['self'], href=resource_url)]
yield ent
@classmethod
def format_exception(cls, exc):
status_code = getattr(exc, 'status_code', 500)
body = {'class': ['exception', exc.__class__.__name__],
'actions': [], 'entities': [], 'links': [],
'properties': dict(status=status_code, message=six.text_type(exc))}
return json.dumps(body), cls.formats[0], status_code
@classmethod
def format_request(cls, request):
return request | gpl-2.0 | -4,403,779,153,757,772,300 | 38.633663 | 90 | 0.592454 | false | 4.208202 | false | false | false | 0.002999 |
klmitch/nova | nova/objects/compute_node.py | 3 | 23792 | # Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from oslo_utils import versionutils
from sqlalchemy import or_
from sqlalchemy.sql import null
import nova.conf
from nova.db import api as db
from nova.db.sqlalchemy import api as sa_api
from nova.db.sqlalchemy import models
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import pci_device_pool
CONF = nova.conf.CONF
@base.NovaObjectRegistry.register
class ComputeNode(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_service_id()
# Version 1.2: String attributes updated to support unicode
# Version 1.3: Added stats field
# Version 1.4: Added host ip field
# Version 1.5: Added numa_topology field
# Version 1.6: Added supported_hv_specs
# Version 1.7: Added host field
# Version 1.8: Added get_by_host_and_nodename()
# Version 1.9: Added pci_device_pools
# Version 1.10: Added get_first_node_by_host_for_old_compat()
# Version 1.11: PciDevicePoolList version 1.1
# Version 1.12: HVSpec version 1.1
# Version 1.13: Changed service_id field to be nullable
# Version 1.14: Added cpu_allocation_ratio and ram_allocation_ratio
# Version 1.15: Added uuid
# Version 1.16: Added disk_allocation_ratio
# Version 1.17: Added mapped
# Version 1.18: Added get_by_uuid().
# Version 1.19: Added get_by_nodename().
VERSION = '1.19'
fields = {
'id': fields.IntegerField(read_only=True),
'uuid': fields.UUIDField(read_only=True),
'service_id': fields.IntegerField(nullable=True),
'host': fields.StringField(nullable=True),
'vcpus': fields.IntegerField(),
'memory_mb': fields.IntegerField(),
'local_gb': fields.IntegerField(),
'vcpus_used': fields.IntegerField(),
'memory_mb_used': fields.IntegerField(),
'local_gb_used': fields.IntegerField(),
'hypervisor_type': fields.StringField(),
'hypervisor_version': fields.IntegerField(),
'hypervisor_hostname': fields.StringField(nullable=True),
'free_ram_mb': fields.IntegerField(nullable=True),
'free_disk_gb': fields.IntegerField(nullable=True),
'current_workload': fields.IntegerField(nullable=True),
'running_vms': fields.IntegerField(nullable=True),
# TODO(melwitt): cpu_info is non-nullable in the schema but we must
# wait until version 2.0 of ComputeNode to change it to non-nullable
'cpu_info': fields.StringField(nullable=True),
'disk_available_least': fields.IntegerField(nullable=True),
'metrics': fields.StringField(nullable=True),
'stats': fields.DictOfNullableStringsField(nullable=True),
'host_ip': fields.IPAddressField(nullable=True),
# TODO(rlrossit): because of history, numa_topology is held here as a
# StringField, not a NUMATopology object. In version 2 of ComputeNode
# this will be converted over to a fields.ObjectField('NUMATopology')
'numa_topology': fields.StringField(nullable=True),
# NOTE(pmurray): the supported_hv_specs field maps to the
# supported_instances field in the database
'supported_hv_specs': fields.ListOfObjectsField('HVSpec'),
# NOTE(pmurray): the pci_device_pools field maps to the
# pci_stats field in the database
'pci_device_pools': fields.ObjectField('PciDevicePoolList',
nullable=True),
'cpu_allocation_ratio': fields.FloatField(),
'ram_allocation_ratio': fields.FloatField(),
'disk_allocation_ratio': fields.FloatField(),
'mapped': fields.IntegerField(),
}
def obj_make_compatible(self, primitive, target_version):
super(ComputeNode, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 17):
if 'mapped' in primitive:
del primitive['mapped']
if target_version < (1, 16):
if 'disk_allocation_ratio' in primitive:
del primitive['disk_allocation_ratio']
if target_version < (1, 15):
if 'uuid' in primitive:
del primitive['uuid']
if target_version < (1, 14):
if 'ram_allocation_ratio' in primitive:
del primitive['ram_allocation_ratio']
if 'cpu_allocation_ratio' in primitive:
del primitive['cpu_allocation_ratio']
if target_version < (1, 13) and primitive.get('service_id') is None:
# service_id is non-nullable in versions before 1.13
try:
service = objects.Service.get_by_compute_host(
self._context, primitive['host'])
primitive['service_id'] = service.id
except (exception.ComputeHostNotFound, KeyError):
# NOTE(hanlind): In case anything goes wrong like service not
# found or host not being set, catch and set a fake value just
# to allow for older versions that demand a value to work.
# Setting to -1 will, if value is later used result in a
# ServiceNotFound, so should be safe.
primitive['service_id'] = -1
if target_version < (1, 9) and 'pci_device_pools' in primitive:
del primitive['pci_device_pools']
if target_version < (1, 7) and 'host' in primitive:
del primitive['host']
if target_version < (1, 6) and 'supported_hv_specs' in primitive:
del primitive['supported_hv_specs']
if target_version < (1, 5) and 'numa_topology' in primitive:
del primitive['numa_topology']
if target_version < (1, 4) and 'host_ip' in primitive:
del primitive['host_ip']
if target_version < (1, 3) and 'stats' in primitive:
# pre 1.3 version does not have a stats field
del primitive['stats']
@staticmethod
def _host_from_db_object(compute, db_compute):
if (('host' not in db_compute or db_compute['host'] is None) and
'service_id' in db_compute and
db_compute['service_id'] is not None):
# FIXME(sbauza) : Unconverted compute record, provide compatibility
# This has to stay until we can be sure that any/all compute nodes
# in the database have been converted to use the host field
# Service field of ComputeNode could be deprecated in a next patch,
# so let's use directly the Service object
try:
service = objects.Service.get_by_id(
compute._context, db_compute['service_id'])
except exception.ServiceNotFound:
compute.host = None
return
try:
compute.host = service.host
except (AttributeError, exception.OrphanedObjectError):
# Host can be nullable in Service
compute.host = None
elif 'host' in db_compute and db_compute['host'] is not None:
# New-style DB having host as a field
compute.host = db_compute['host']
else:
# We assume it should not happen but in case, let's set it to None
compute.host = None
@staticmethod
def _from_db_object(context, compute, db_compute):
special_cases = set([
'stats',
'supported_hv_specs',
'host',
'pci_device_pools',
])
fields = set(compute.fields) - special_cases
online_updates = {}
for key in fields:
value = db_compute[key]
# NOTE(sbauza): Since all compute nodes don't possibly run the
# latest RT code updating allocation ratios, we need to provide
# a backwards compatible way of hydrating them.
# As we want to care about our operators and since we don't want to
# ask them to change their configuration files before upgrading, we
# prefer to hardcode the default values for the ratios here until
# the next release (Newton) where the opt default values will be
# restored for both cpu (16.0), ram (1.5) and disk (1.0)
# allocation ratios.
# TODO(yikun): Remove this online migration code when all ratio
# values are NOT 0.0 or NULL
ratio_keys = ['cpu_allocation_ratio', 'ram_allocation_ratio',
'disk_allocation_ratio']
if key in ratio_keys and value in (None, 0.0):
# ResourceTracker is not updating the value (old node)
# or the compute node is updated but the default value has
# not been changed
r = getattr(CONF, key)
# NOTE(yikun): If the allocation ratio record is not set, the
# allocation ratio will be changed to the
# CONF.x_allocation_ratio value if x_allocation_ratio is
# set, and fallback to use the CONF.initial_x_allocation_ratio
# otherwise.
init_x_ratio = getattr(CONF, 'initial_%s' % key)
value = r if r else init_x_ratio
online_updates[key] = value
elif key == 'numa_topology' and value and (
'nova_object.name' not in value):
# TODO(stephenfin): Remove this online migration in X or later,
# once this has bedded in
value = objects.NUMATopology.from_legacy_object(value)
online_updates[key] = value
elif key == 'mapped':
value = 0 if value is None else value
setattr(compute, key, value)
if online_updates:
db.compute_node_update(context, compute.id, online_updates)
stats = db_compute['stats']
if stats:
compute.stats = jsonutils.loads(stats)
sup_insts = db_compute.get('supported_instances')
if sup_insts:
hv_specs = jsonutils.loads(sup_insts)
hv_specs = [objects.HVSpec.from_list(hv_spec)
for hv_spec in hv_specs]
compute.supported_hv_specs = hv_specs
pci_stats = db_compute.get('pci_stats')
if pci_stats is not None:
pci_stats = pci_device_pool.from_pci_stats(pci_stats)
compute.pci_device_pools = pci_stats
compute._context = context
# Make sure that we correctly set the host field depending on either
# host column is present in the table or not
compute._host_from_db_object(compute, db_compute)
compute.obj_reset_changes()
return compute
@base.remotable_classmethod
def get_by_id(cls, context, compute_id):
db_compute = db.compute_node_get(context, compute_id)
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_by_uuid(cls, context, compute_uuid):
nodes = ComputeNodeList.get_all_by_uuids(context, [compute_uuid])
# We have a unique index on the uuid column so we can get back 0 or 1.
if not nodes:
raise exception.ComputeHostNotFound(host=compute_uuid)
return nodes[0]
# NOTE(hanlind): This is deprecated and should be removed on the next
# major version bump
@base.remotable_classmethod
def get_by_service_id(cls, context, service_id):
db_computes = db.compute_nodes_get_by_service_id(context, service_id)
# NOTE(sbauza): Old version was returning an item, we need to keep this
# behaviour for backwards compatibility
db_compute = db_computes[0]
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_by_host_and_nodename(cls, context, host, nodename):
db_compute = db.compute_node_get_by_host_and_nodename(
context, host, nodename)
return cls._from_db_object(context, cls(), db_compute)
@base.remotable_classmethod
def get_by_nodename(cls, context, hypervisor_hostname):
'''Get by node name (i.e. hypervisor hostname).
Raises ComputeHostNotFound if hypervisor_hostname with the given name
doesn't exist.
'''
db_compute = db.compute_node_get_by_nodename(
context, hypervisor_hostname)
return cls._from_db_object(context, cls(), db_compute)
# TODO(pkholkin): Remove this method in the next major version bump
@base.remotable_classmethod
def get_first_node_by_host_for_old_compat(cls, context, host,
use_slave=False):
computes = ComputeNodeList.get_all_by_host(context, host, use_slave)
# FIXME(sbauza): Ironic deployments can return multiple
# nodes per host, we should return all the nodes and modify the callers
# instead.
# Arbitrarily returning the first node.
return computes[0]
@staticmethod
def _convert_stats_to_db_format(updates):
stats = updates.pop('stats', None)
if stats is not None:
updates['stats'] = jsonutils.dumps(stats)
@staticmethod
def _convert_host_ip_to_db_format(updates):
host_ip = updates.pop('host_ip', None)
if host_ip:
updates['host_ip'] = str(host_ip)
@staticmethod
def _convert_supported_instances_to_db_format(updates):
hv_specs = updates.pop('supported_hv_specs', None)
if hv_specs is not None:
hv_specs = [hv_spec.to_list() for hv_spec in hv_specs]
updates['supported_instances'] = jsonutils.dumps(hv_specs)
@staticmethod
def _convert_pci_stats_to_db_format(updates):
if 'pci_device_pools' in updates:
pools = updates.pop('pci_device_pools')
if pools is not None:
pools = jsonutils.dumps(pools.obj_to_primitive())
updates['pci_stats'] = pools
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
if 'uuid' not in updates:
updates['uuid'] = uuidutils.generate_uuid()
self.uuid = updates['uuid']
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
db_compute = db.compute_node_create(self._context, updates)
self._from_db_object(self._context, self, db_compute)
@base.remotable
def save(self, prune_stats=False):
# NOTE(belliott) ignore prune_stats param, no longer relevant
updates = self.obj_get_changes()
updates.pop('id', None)
self._convert_stats_to_db_format(updates)
self._convert_host_ip_to_db_format(updates)
self._convert_supported_instances_to_db_format(updates)
self._convert_pci_stats_to_db_format(updates)
db_compute = db.compute_node_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_compute)
@base.remotable
def destroy(self):
db.compute_node_delete(self._context, self.id)
def update_from_virt_driver(self, resources):
# NOTE(pmurray): the virt driver provides a dict of values that
# can be copied into the compute node. The names and representation
# do not exactly match.
# TODO(pmurray): the resources dict should be formalized.
keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used",
"numa_topology", "hypervisor_type",
"hypervisor_version", "hypervisor_hostname",
"disk_available_least", "host_ip", "uuid"]
for key in keys:
if key in resources:
# The uuid field is read-only so it should only be set when
# creating the compute node record for the first time. Ignore
# it otherwise.
if key == 'uuid' and 'uuid' in self:
continue
setattr(self, key, resources[key])
# supported_instances has a different name in compute_node
if 'supported_instances' in resources:
si = resources['supported_instances']
self.supported_hv_specs = [objects.HVSpec.from_list(s) for s in si]
@base.NovaObjectRegistry.register
class ComputeNodeList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# ComputeNode <= version 1.2
# Version 1.1 ComputeNode version 1.3
# Version 1.2 Add get_by_service()
# Version 1.3 ComputeNode version 1.4
# Version 1.4 ComputeNode version 1.5
# Version 1.5 Add use_slave to get_by_service
# Version 1.6 ComputeNode version 1.6
# Version 1.7 ComputeNode version 1.7
# Version 1.8 ComputeNode version 1.8 + add get_all_by_host()
# Version 1.9 ComputeNode version 1.9
# Version 1.10 ComputeNode version 1.10
# Version 1.11 ComputeNode version 1.11
# Version 1.12 ComputeNode version 1.12
# Version 1.13 ComputeNode version 1.13
# Version 1.14 ComputeNode version 1.14
# Version 1.15 Added get_by_pagination()
# Version 1.16: Added get_all_by_uuids()
# Version 1.17: Added get_all_by_not_mapped()
VERSION = '1.17'
fields = {
'objects': fields.ListOfObjectsField('ComputeNode'),
}
@base.remotable_classmethod
def get_all(cls, context):
db_computes = db.compute_node_get_all(context)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@base.remotable_classmethod
def get_all_by_not_mapped(cls, context, mapped_less_than):
"""Return ComputeNode records that are not mapped at a certain level"""
db_computes = db.compute_node_get_all_mapped_less_than(
context, mapped_less_than)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@base.remotable_classmethod
def get_by_pagination(cls, context, limit=None, marker=None):
db_computes = db.compute_node_get_all_by_pagination(
context, limit=limit, marker=marker)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@base.remotable_classmethod
def get_by_hypervisor(cls, context, hypervisor_match):
db_computes = db.compute_node_search_by_hypervisor(context,
hypervisor_match)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
# NOTE(hanlind): This is deprecated and should be removed on the next
# major version bump
@base.remotable_classmethod
def _get_by_service(cls, context, service_id, use_slave=False):
try:
db_computes = db.compute_nodes_get_by_service_id(
context, service_id)
except exception.ServiceNotFound:
# NOTE(sbauza): Previous behaviour was returning an empty list
# if the service was created with no computes, we need to keep it.
db_computes = []
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@staticmethod
@db.select_db_reader_mode
def _db_compute_node_get_all_by_host(context, host, use_slave=False):
return db.compute_node_get_all_by_host(context, host)
@base.remotable_classmethod
def get_all_by_host(cls, context, host, use_slave=False):
db_computes = cls._db_compute_node_get_all_by_host(context, host,
use_slave=use_slave)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@staticmethod
@db.select_db_reader_mode
def _db_compute_node_get_all_by_uuids(context, compute_uuids):
db_computes = sa_api.model_query(context, models.ComputeNode).filter(
models.ComputeNode.uuid.in_(compute_uuids)).all()
return db_computes
@base.remotable_classmethod
def get_all_by_uuids(cls, context, compute_uuids):
db_computes = cls._db_compute_node_get_all_by_uuids(context,
compute_uuids)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
@staticmethod
@db.select_db_reader_mode
def _db_compute_node_get_by_hv_type(context, hv_type):
db_computes = context.session.query(models.ComputeNode).filter(
models.ComputeNode.hypervisor_type == hv_type).all()
return db_computes
@classmethod
def get_by_hypervisor_type(cls, context, hv_type):
db_computes = cls._db_compute_node_get_by_hv_type(context, hv_type)
return base.obj_make_list(context, cls(context), objects.ComputeNode,
db_computes)
def _get_node_empty_ratio(context, max_count):
"""Query the DB for non-deleted compute_nodes with 0.0/None alloc ratios
Results are limited by ``max_count``.
"""
return context.session.query(models.ComputeNode).filter(or_(
models.ComputeNode.ram_allocation_ratio == '0.0',
models.ComputeNode.cpu_allocation_ratio == '0.0',
models.ComputeNode.disk_allocation_ratio == '0.0',
models.ComputeNode.ram_allocation_ratio == null(),
models.ComputeNode.cpu_allocation_ratio == null(),
models.ComputeNode.disk_allocation_ratio == null()
)).filter(models.ComputeNode.deleted == 0).limit(max_count).all()
@sa_api.pick_context_manager_writer
def migrate_empty_ratio(context, max_count):
cns = _get_node_empty_ratio(context, max_count)
# NOTE(yikun): If it's an existing record with 0.0 or None values,
# we need to migrate this record using 'xxx_allocation_ratio' config
# if it's set, and fallback to use the 'initial_xxx_allocation_ratio'
# otherwise.
for cn in cns:
for t in ['cpu', 'disk', 'ram']:
current_ratio = getattr(cn, '%s_allocation_ratio' % t)
if current_ratio in (0.0, None):
r = getattr(CONF, "%s_allocation_ratio" % t)
init_x_ratio = getattr(CONF, "initial_%s_allocation_ratio" % t)
conf_alloc_ratio = r if r else init_x_ratio
setattr(cn, '%s_allocation_ratio' % t, conf_alloc_ratio)
context.session.add(cn)
found = done = len(cns)
return found, done
| apache-2.0 | -351,150,096,048,235,700 | 43.806026 | 79 | 0.616846 | false | 3.975936 | false | false | false | 0.000042 |
andymckenzie/bestbooks | get_goodreads_data.py | 1 | 2774 | #you have to replace the key sections with your own access keys, of course
#find a list of around 1000 book titles to search, get the isbndb data for each of them, then use that data to query goodreads
#then save into a .csv file there levant info including the rating and the number of ratings for each book, in a row
#see https://www.goodreads.com/topic/show/776821-get-review-statistics-given-a-list-of-isbns for more advice
import urllib2
import xml.etree.ElementTree as ET
import time
import json
import requests
from pprint import pprint
import csv
#goodreadsAPIinfo
key=
#secret=
#isbndbapiinfo
key=
#data to get from isbndb = isbn, author, year published
with open("1001_novels.txt","rb") as f:
list_of_novel_data=f.readlines()
list_of_books=[]
for line in list_of_novel_data:
words=line.split("by")
list_of_books.append(words[0])
book_data_LoL=[ []for book in list_of_novel_data]
#might want to also try to get author or publisher data from the book
isbndb_data = [ [] for book in list_of_novel_data]
isbn13_string="9780590353427"
i=0
for book in list_of_books:
book_title=book.rstrip().replace(" ","+").replace(",","").replace("?","").replace("'","")
print book_title
response=urllib2.urlopen(str("http://www.isbndb.com/api/books.xml?access_key=&index1=title&value1="+book_title))
xml = response.read()
root = ET.fromstring(xml)
if i<1001:
for child in root:
j = 0
for child2 in child:
isbn = child2.attrib['isbn13']
isbn13 = "," + str(isbn)
if j == 0:
isbn13_string += isbn13
isbndb_data[i].append(book.rstrip().replace(",","").replace("?","").replace("'",""))
isbndb_data[i].append(isbn13)
else:
break
j += 1
else:
break
time.sleep(2)
i+=1
print isbndb_data
print isbn13_string
#http://www.goodreads.com/book/review_counts.json?isbns=9780590353427&9780582210202&9788426572363&key=
u=("http://www.goodreads.com/book/review_counts.json?isbns=" + isbn13_string + "&key=")
#Take the goodreads json, spit out back the isbn13, the ratings count, and the rating
data = json.loads(requests.get(u).text)
print type(data['books'])
print data['books']
i = 0
for book in data['books']:
print type(book_data_LoL[i])
book_data_LoL[i].append(book['isbn13'])
book_data_LoL[i].append(book['work_ratings_count'])
book_data_LoL[i].append(book['average_rating'])
i += 1
with open("output_title_isbn1.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(isbndb_data)
with open("output_goodreads_isbn1.csv", "wb") as f:
writer = csv.writer(f)
writer.writerows(book_data_LoL)
| mit | 2,294,552,616,201,172,000 | 30.168539 | 126 | 0.650324 | false | 3.051705 | false | false | false | 0.020548 |
wwright2/dcim3-angstrom1 | sources/bitbake/lib/bb/daemonize.py | 7 | 8070 | """
Python Daemonizing helper
Configurable daemon behaviors:
1.) The current working directory set to the "/" directory.
2.) The current file creation mode mask set to 0.
3.) Close all open files (1024).
4.) Redirect standard I/O streams to "/dev/null".
A failed call to fork() now raises an exception.
References:
1) Advanced Programming in the Unix Environment: W. Richard Stevens
http://www.apuebook.com/apue3e.html
2) The Linux Programming Interface: Michael Kerrisk
http://man7.org/tlpi/index.html
3) Unix Programming Frequently Asked Questions:
http://www.faqs.org/faqs/unix-faq/programmer/faq/
Modified to allow a function to be daemonized and return for
bitbake use by Richard Purdie
"""
__author__ = "Chad J. Schroeder"
__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
__version__ = "0.2"
# Standard Python modules.
import os # Miscellaneous OS interfaces.
import sys # System-specific parameters and functions.
# Default daemon parameters.
# File mode creation mask of the daemon.
# For BitBake's children, we do want to inherit the parent umask.
UMASK = None
# Default maximum for the number of available file descriptors.
MAXFD = 1024
# The standard I/O file descriptors are redirected to /dev/null by default.
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
def createDaemon(function, logfile):
"""
Detach a process from the controlling terminal and run it in the
background as a daemon, returning control to the caller.
"""
try:
# Fork a child process so the parent can exit. This returns control to
# the command-line or shell. It also guarantees that the child will not
# be a process group leader, since the child receives a new process ID
# and inherits the parent's process group ID. This step is required
# to insure that the next call to os.setsid is successful.
pid = os.fork()
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
if (pid == 0): # The first child.
# To become the session leader of this new session and the process group
# leader of the new process group, we call os.setsid(). The process is
# also guaranteed not to have a controlling terminal.
os.setsid()
# Is ignoring SIGHUP necessary?
#
# It's often suggested that the SIGHUP signal should be ignored before
# the second fork to avoid premature termination of the process. The
# reason is that when the first child terminates, all processes, e.g.
# the second child, in the orphaned group will be sent a SIGHUP.
#
# "However, as part of the session management system, there are exactly
# two cases where SIGHUP is sent on the death of a process:
#
# 1) When the process that dies is the session leader of a session that
# is attached to a terminal device, SIGHUP is sent to all processes
# in the foreground process group of that terminal device.
# 2) When the death of a process causes a process group to become
# orphaned, and one or more processes in the orphaned group are
# stopped, then SIGHUP and SIGCONT are sent to all members of the
# orphaned group." [2]
#
# The first case can be ignored since the child is guaranteed not to have
# a controlling terminal. The second case isn't so easy to dismiss.
# The process group is orphaned when the first child terminates and
# POSIX.1 requires that every STOPPED process in an orphaned process
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
# second child is not STOPPED though, we can safely forego ignoring the
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
#
# import signal # Set handlers for asynchronous events.
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
# Fork a second child and exit immediately to prevent zombies. This
# causes the second child process to be orphaned, making the init
# process responsible for its cleanup. And, since the first child is
# a session leader without a controlling terminal, it's possible for
# it to acquire one by opening a terminal in the future (System V-
# based systems). This second fork guarantees that the child is no
# longer a session leader, preventing the daemon from ever acquiring
# a controlling terminal.
pid = os.fork() # Fork a second child.
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
if (pid == 0): # The second child.
# We probably don't want the file mode creation mask inherited from
# the parent, so we give the child complete control over permissions.
if UMASK is not None:
os.umask(UMASK)
else:
# Parent (the first child) of the second child.
os._exit(0)
else:
# exit() or _exit()?
# _exit is like exit(), but it doesn't call any functions registered
# with atexit (and on_exit) or any registered signal handlers. It also
# closes any open file descriptors. Using exit() may cause all stdio
# streams to be flushed twice and any temporary files may be unexpectedly
# removed. It's therefore recommended that child branches of a fork()
# and the parent branch(es) of a daemon use _exit().
return
# Close all open file descriptors. This prevents the child from keeping
# open any file descriptors inherited from the parent. There is a variety
# of methods to accomplish this task. Three are listed below.
#
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
# number of open file descriptors to close. If it doesn't exist, use
# the default value (configurable).
#
# try:
# maxfd = os.sysconf("SC_OPEN_MAX")
# except (AttributeError, ValueError):
# maxfd = MAXFD
#
# OR
#
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
# maxfd = os.sysconf("SC_OPEN_MAX")
# else:
# maxfd = MAXFD
#
# OR
#
# Use the getrlimit method to retrieve the maximum file descriptor number
# that can be opened by this process. If there is no limit on the
# resource, use the default value.
#
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
# for fd in range(0, maxfd):
# try:
# os.close(fd)
# except OSError: # ERROR, fd wasn't open to begin with (ignored)
# pass
# Redirect the standard I/O file descriptors to the specified file. Since
# the daemon has no controlling terminal, most daemons redirect stdin,
# stdout, and stderr to /dev/null. This is done to prevent side-effects
# from reads and writes to the standard I/O file descriptors.
# This call to open is guaranteed to return the lowest file descriptor,
# which will be 0 (stdin), since it was closed above.
# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
# os.dup2(0, 1) # standard output (1)
# os.dup2(0, 2) # standard error (2)
si = file('/dev/null', 'r')
so = file(logfile, 'w')
se = so
# Replace those fds with our own
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
function()
os._exit(0)
| mit | -7,296,642,820,362,077,000 | 40.813472 | 82 | 0.641512 | false | 4.049172 | false | false | false | 0.003594 |
kazitanvirahsan/scrapy | scrapy/utils/response.py | 55 | 3129 | """
This module provides some useful functions for working with
scrapy.http.Response objects
"""
import os
import re
import weakref
import webbrowser
import tempfile
from twisted.web import http
from scrapy.utils.python import to_bytes, to_native_str
from w3lib import html
from scrapy.utils.decorators import deprecated
@deprecated
def body_or_str(*a, **kw):
from scrapy.utils.iterators import _body_or_str
return _body_or_str(*a, **kw)
_baseurl_cache = weakref.WeakKeyDictionary()
def get_base_url(response):
"""Return the base url of the given response, joined with the response url"""
if response not in _baseurl_cache:
text = response.body_as_unicode()[0:4096]
_baseurl_cache[response] = html.get_base_url(text, response.url,
response.encoding)
return _baseurl_cache[response]
_noscript_re = re.compile(u'<noscript>.*?</noscript>', re.IGNORECASE | re.DOTALL)
_script_re = re.compile(u'<script.*?>.*?</script>', re.IGNORECASE | re.DOTALL)
_metaref_cache = weakref.WeakKeyDictionary()
def get_meta_refresh(response):
"""Parse the http-equiv refrsh parameter from the given response"""
if response not in _metaref_cache:
text = response.body_as_unicode()[0:4096]
text = _noscript_re.sub(u'', text)
text = _script_re.sub(u'', text)
_metaref_cache[response] = html.get_meta_refresh(text, response.url,
response.encoding)
return _metaref_cache[response]
def response_status_message(status):
"""Return status code plus status text descriptive message
>>> response_status_message(200)
'200 OK'
>>> response_status_message(404)
'404 Not Found'
"""
return '%s %s' % (status, to_native_str(http.RESPONSES.get(int(status))))
def response_httprepr(response):
"""Return raw HTTP representation (as bytes) of the given response. This
is provided only for reference, since it's not the exact stream of bytes
that was received (that's not exposed by Twisted).
"""
s = b"HTTP/1.1 " + to_bytes(str(response.status)) + b" " + \
to_bytes(http.RESPONSES.get(response.status, b'')) + b"\r\n"
if response.headers:
s += response.headers.to_string() + b"\r\n"
s += b"\r\n"
s += response.body
return s
def open_in_browser(response, _openfunc=webbrowser.open):
"""Open the given response in a local web browser, populating the <base>
tag for external links to work
"""
from scrapy.http import HtmlResponse, TextResponse
# XXX: this implementation is a bit dirty and could be improved
body = response.body
if isinstance(response, HtmlResponse):
if b'<base' not in body:
repl = '<head><base href="%s">' % response.url
body = body.replace(b'<head>', to_bytes(repl))
ext = '.html'
elif isinstance(response, TextResponse):
ext = '.txt'
else:
raise TypeError("Unsupported response type: %s" %
response.__class__.__name__)
fd, fname = tempfile.mkstemp(ext)
os.write(fd, body)
os.close(fd)
return _openfunc("file://%s" % fname)
| bsd-3-clause | -4,311,945,944,606,696,400 | 32.287234 | 81 | 0.657079 | false | 3.617341 | false | false | false | 0.001918 |
tony-rasskazov/meteo | weewx/bin/weewx/almanac.py | 5 | 19940 | #
# Copyright (c) 2009-2015 Tom Keffer <tkeffer@gmail.com>
#
# See the file LICENSE.txt for your full rights.
#
"""Almanac data
This module can optionally use PyEphem, which offers high quality
astronomical calculations. See http://rhodesmill.org/pyephem. """
import time
import sys
import math
import copy
import weeutil.Moon
import weewx.units
# If the user has installed ephem, use it. Otherwise, fall back to the weeutil algorithms:
try:
import ephem
except ImportError:
import weeutil.Sun
# NB: Have Almanac inherit from 'object'. However, this will cause
# an 'autocall' bug in Cheetah versions before 2.1.
class Almanac(object):
"""Almanac data.
ATTRIBUTES.
As a minimum, the following attributes are available:
sunrise: Time (local) upper limb of the sun rises above the horizon, formatted using the format 'timeformat'.
sunset: Time (local) upper limb of the sun sinks below the horizon, formatted using the format 'timeformat'.
moon_phase: A description of the moon phase(eg. "new moon", Waxing crescent", etc.)
moon_fullness: Percent fullness of the moon (0=new moon, 100=full moon)
If the module 'ephem' is used, them many other attributes are available.
Here are a few examples:
sun.rise: Time upper limb of sun will rise above the horizon today in unix epoch time
sun.transit: Time of transit today (sun over meridian) in unix epoch time
sun.previous_sunrise: Time of last sunrise in unix epoch time
sun.az: Azimuth (in degrees) of sun
sun.alt: Altitude (in degrees) of sun
mars.rise: Time when upper limb of mars will rise above horizon today in unix epoch time
mars.ra: Right ascension of mars
etc.
EXAMPLES:
These examples are designed to work in the Pacific timezone
>>> import os
>>> os.environ['TZ'] = 'America/Los_Angeles'
>>> t = 1238180400
>>> print timestamp_to_string(t)
2009-03-27 12:00:00 PDT (1238180400)
Test conversions to Dublin Julian Days
>>> t_djd = timestamp_to_djd(t)
>>> print "%.5f" % t_djd
39898.29167
Test the conversion back
>>> print "%.0f" % djd_to_timestamp(t_djd)
1238180400
>>> almanac = Almanac(t, 46.0, -122.0)
Test backwards compatibility with attribute 'moon_fullness':
>>> print "Fullness of the moon (rounded) is %.2f%% [%s]" % (almanac.moon_fullness, almanac.moon_phase)
Fullness of the moon (rounded) is 2.00% [new (totally dark)]
Now get a more precise result for fullness of the moon:
>>> print "Fullness of the moon (more precise) is %.2f%%" % almanac.moon.moon_fullness
Fullness of the moon (more precise) is 1.70%
Test backwards compatibility with attributes 'sunrise' and 'sunset'
>>> print "Sunrise, sunset:", almanac.sunrise, almanac.sunset
Sunrise, sunset: 06:56 19:30
Get sunrise, sun transit, and sunset using the new 'ephem' syntax:
>>> print "Sunrise, sun transit, sunset:", almanac.sun.rise, almanac.sun.transit, almanac.sun.set
Sunrise, sun transit, sunset: 06:56 13:13 19:30
Do the same with the moon:
>>> print "Moon rise, transit, set:", almanac.moon.rise, almanac.moon.transit, almanac.moon.set
Moon rise, transit, set: 06:59 14:01 21:20
And Mars
>>> print "Mars rise, transit, set:", almanac.mars.rise, almanac.mars.transit, almanac.moon.set
Mars rise, transit, set: 06:08 11:34 21:20
Finally, try a star
>>> print "Rigel rise, transit, set:", almanac.rigel.rise, almanac.rigel.transit, almanac.rigel.set
Rigel rise, transit, set: 12:32 18:00 23:28
Exercise equinox, solstice routines
>>> print almanac.next_vernal_equinox
20-Mar-2010 10:32
>>> print almanac.next_autumnal_equinox
22-Sep-2009 14:18
>>> print almanac.next_summer_solstice
20-Jun-2009 22:45
>>> print almanac.previous_winter_solstice
21-Dec-2008 04:03
>>> print almanac.next_winter_solstice
21-Dec-2009 09:46
Exercise moon state routines
>>> print almanac.next_full_moon
09-Apr-2009 07:55
>>> print almanac.next_new_moon
24-Apr-2009 20:22
>>> print almanac.next_first_quarter_moon
02-Apr-2009 07:33
Now location of the sun and moon
>>> print "Solar azimuth, altitude = (%.2f, %.2f)" % (almanac.sun.az, almanac.sun.alt)
Solar azimuth, altitude = (154.14, 44.02)
>>> print "Moon azimuth, altitude = (%.2f, %.2f)" % (almanac.moon.az, almanac.moon.alt)
Moon azimuth, altitude = (133.55, 47.89)
Try a time and location where the sun is always up
>>> t = 1371044003
>>> print timestamp_to_string(t)
2013-06-12 06:33:23 PDT (1371044003)
>>> almanac = Almanac(t, 64.0, 0.0)
>>> print almanac(horizon=-6).sun(use_center=1).rise
N/A
Try the pyephem "Naval Observatory" example.
>>> t = 1252256400
>>> print timestamp_to_gmtime(t)
2009-09-06 17:00:00 UTC (1252256400)
>>> atlanta = Almanac(t, 33.8, -84.4, pressure=0, horizon=-34.0/60.0)
>>> # Print it in GMT, so it can easily be compared to the example:
>>> print timestamp_to_gmtime(atlanta.sun.previous_rising.raw)
2009-09-06 11:14:56 UTC (1252235696)
>>> print timestamp_to_gmtime(atlanta.moon.next_setting.raw)
2009-09-07 14:05:29 UTC (1252332329)
Now try the civil twilight examples:
>>> print timestamp_to_gmtime(atlanta(horizon=-6).sun(use_center=1).previous_rising.raw)
2009-09-06 10:49:40 UTC (1252234180)
>>> print timestamp_to_gmtime(atlanta(horizon=-6).sun(use_center=1).next_setting.raw)
2009-09-07 00:21:22 UTC (1252282882)
Try sun rise again, to make sure the horizon value cleared:
>>> print timestamp_to_gmtime(atlanta.sun.previous_rising.raw)
2009-09-06 11:14:56 UTC (1252235696)
Try an attribute that does not explicitly appear in the class Almanac
>>> print "%.3f" % almanac.mars.sun_distance
1.494
Try a specialized attribute for Jupiter
>>> print almanac.jupiter.cmlI
191:16:58.0
Should fail if applied to a different body
>>> print almanac.venus.cmlI
Traceback (most recent call last):
...
AttributeError: 'Venus' object has no attribute 'cmlI'
Try a nonsense body:
>>> x = almanac.bar.rise
Traceback (most recent call last):
...
KeyError: 'Bar'
Try a nonsense tag:
>>> x = almanac.sun.foo
Traceback (most recent call last):
...
AttributeError: 'Sun' object has no attribute 'foo'
"""
def __init__(self, time_ts, lat, lon,
altitude=None,
temperature=None,
pressure=None,
horizon=None,
moon_phases=weeutil.Moon.moon_phases,
formatter=weewx.units.Formatter()):
"""Initialize an instance of Almanac
time_ts: A unix epoch timestamp with the time of the almanac. If None, the
present time will be used.
lat, lon: Observer's location in degrees.
altitude: Observer's elevation in **meters**. [Optional. Default is 0 (sea level)]
temperature: Observer's temperature in **degrees Celsius**. [Optional. Default is 15.0]
pressure: Observer's atmospheric pressure in **mBars**. [Optional. Default is 1010]
horizon: Angle of the horizon in degrees [Optional. Default is zero]
moon_phases: An array of 8 strings with descriptions of the moon
phase. [optional. If not given, then weeutil.Moon.moon_phases will be used]
formatter: An instance of weewx.units.Formatter() with the formatting information
to be used.
"""
self.time_ts = time_ts if time_ts else time.time()
self.time_djd = timestamp_to_djd(self.time_ts)
self.lat = lat
self.lon = lon
self.altitude = altitude if altitude is not None else 0.0
self.temperature = temperature if temperature is not None else 15.0
self.pressure = pressure if pressure is not None else 1010.0
self.horizon = horizon if horizon is not None else 0.0
self.moon_phases = moon_phases
self.formatter = formatter
(y,m,d) = time.localtime(self.time_ts)[0:3]
(self.moon_index, self._moon_fullness) = weeutil.Moon.moon_phase(y, m, d)
self.moon_phase = self.moon_phases[self.moon_index]
# Check to see whether the user has module 'ephem'.
if 'ephem' in sys.modules:
self.hasExtras = True
else:
# No ephem package. Use the weeutil algorithms, which supply a minimum of functionality
(sunrise_utc_h, sunset_utc_h) = weeutil.Sun.sunRiseSet(y, m, d, self.lon, self.lat)
sunrise_ts = weeutil.weeutil.utc_to_ts(y, m, d, sunrise_utc_h)
sunset_ts = weeutil.weeutil.utc_to_ts(y, m, d, sunset_utc_h)
self._sunrise = weewx.units.ValueHelper((sunrise_ts, "unix_epoch", "group_time"),
context="ephem_day", formatter=self.formatter)
self._sunset = weewx.units.ValueHelper((sunset_ts, "unix_epoch", "group_time"),
context="ephem_day", formatter=self.formatter)
self.hasExtras = False
# Shortcuts, used for backwards compatibility
@property
def sunrise(self):
return self.sun.rise if self.hasExtras else self._sunrise
@property
def sunset(self):
return self.sun.set if self.hasExtras else self._sunset
@property
def moon_fullness(self):
return int(self.moon.moon_fullness+0.5) if self.hasExtras else self._moon_fullness
def __call__(self, **kwargs):
"""Call an almanac object as a functor. This allows overriding the values
used when the Almanac instance was initialized.
Named arguments:
almanac_time: The observer's time in unix epoch time.
lat: The observer's latitude in degrees
lon: The observer's longitude in degrees
altitude: The observer's altitude in meters
horizon: The horizon angle in degrees
temperature: The observer's temperature (used to calculate refraction)
pressure: The observer's pressure (used to calculate refraction)
"""
# Make a copy of myself.
almanac = copy.copy(self)
# Now set a new value for any named arguments.
for key in kwargs:
if 'almanac_time' in kwargs:
almanac.time_ts = kwargs['almanac_time']
almanac.time_djd = timestamp_to_djd(self.time_ts)
else:
setattr(almanac, key, kwargs[key])
return almanac
def __getattr__(self, attr):
# This is to get around bugs in the Python version of Cheetah's namemapper:
if attr.startswith('__') or attr == 'has_key':
raise AttributeError(attr)
if not self.hasExtras:
# If the Almanac does not have extended capabilities, we can't
# do any of the following. Raise an exception.
raise AttributeError("Unknown attribute %s" % attr)
# We do have extended capability. Check to see if the attribute is a calendar event:
elif attr in ['previous_equinox', 'next_equinox',
'previous_solstice', 'next_solstice',
'previous_autumnal_equinox', 'next_autumnal_equinox',
'previous_vernal_equinox', 'next_vernal_equinox',
'previous_winter_solstice', 'next_winter_solstice',
'previous_summer_solstice', 'next_summer_solstice',
'previous_new_moon', 'next_new_moon',
'previous_first_quarter_moon', 'next_first_quarter_moon',
'previous_full_moon', 'next_full_moon',
'previous_last_quarter_moon', 'next_last_quarter_moon']:
# This is how you call a function on an instance when all you have
# is the function's name as a string
djd = getattr(ephem, attr)(self.time_djd)
return weewx.units.ValueHelper((djd, "dublin_jd", "group_time"),
context="ephem_year", formatter=self.formatter)
else:
# It's not a calendar event. The attribute must be a heavenly body
# (such as 'sun', or 'jupiter'). Bind the almanac and the heavenly body
# together and return as an AlmanacBinder
return AlmanacBinder(self, attr)
fn_map = {'rise' : 'next_rising',
'set' : 'next_setting',
'transit' : 'next_transit'}
class AlmanacBinder(object):
"""This class binds the observer properties held in Almanac, with the heavenly
body to be observed."""
def __init__(self, almanac, heavenly_body):
# Transfer all values over
self.time_ts = almanac.time_ts
self.time_djd = almanac.time_djd
self.lat = almanac.lat
self.lon = almanac.lon
self.altitude = almanac.altitude
self.temperature = almanac.temperature
self.pressure = almanac.pressure
self.horizon = almanac.horizon
self.moon_phases = almanac.moon_phases
self.moon_phase = almanac.moon_phase
self.formatter = almanac.formatter
# Calculate and store the start-of-day in Dublin Julian Days.
# self.sod_djd = timestamp_to_djd(weeutil.weeutil.startOfDay(self.time_ts))
(y,m,d) = time.localtime(self.time_ts)[0:3]
self.sod_djd = timestamp_to_djd(time.mktime((y,m,d,0,0,0,0,0,-1)))
self.heavenly_body= heavenly_body
self.use_center = False
def __call__(self, use_center=False):
self.use_center = use_center
return self
def __getattr__(self, attr):
"""Get the requested observation, such as when the body will rise."""
if attr.startswith('__'):
raise AttributeError(attr)
# Many of these functions have the unfortunate side effect of changing the state of the body
# being examined. So, create a temporary body and then throw it away
ephem_body = _get_ephem_body(self.heavenly_body)
if attr in ['rise', 'set', 'transit']:
# These verbs refer to the time the event occurs anytime in the day, which
# is not necessarily the *next* sunrise.
attr = fn_map[attr]
# These functions require the time at the start of day
observer = self._get_observer(self.sod_djd)
# Call the function. Be prepared to catch an exception if the body is always up.
try:
if attr in ['next_rising', 'next_setting']:
time_djd = getattr(observer, attr)(ephem_body, use_center=self.use_center)
else:
time_djd = getattr(observer, attr)(ephem_body)
except (ephem.AlwaysUpError, ephem.NeverUpError):
time_djd = None
return weewx.units.ValueHelper((time_djd, "dublin_jd", "group_time"), context="ephem_day", formatter=self.formatter)
elif attr in ['next_rising', 'next_setting', 'next_transit', 'next_antitransit',
'previous_rising', 'previous_setting', 'previous_transit', 'previous_antitransit']:
# These functions require the time of the observation
observer = self._get_observer(self.time_djd)
# Call the function. Be prepared to catch an exception if the body is always up.
try:
if attr in ['next_rising', 'next_setting', 'previous_rising', 'previous_setting']:
time_djd = getattr(observer, attr)(ephem_body, use_center=self.use_center)
else:
time_djd = getattr(observer, attr)(ephem_body)
except (ephem.AlwaysUpError, ephem.NeverUpError):
time_djd = None
return weewx.units.ValueHelper((time_djd, "dublin_jd", "group_time"), context="ephem_day", formatter=self.formatter)
else:
# These functions need the current time in Dublin Julian Days
observer = self._get_observer(self.time_djd)
ephem_body.compute(observer)
if attr in ['az', 'alt', 'a_ra', 'a_dec', 'g_ra', 'ra', 'g_dec', 'dec',
'elong', 'radius', 'hlong', 'hlat', 'sublat', 'sublong']:
# Return the results in degrees rather than radians
return math.degrees(getattr(ephem_body, attr))
elif attr=='moon_fullness':
# The attribute "moon_fullness" is the percentage of the moon surface that is illuminated.
# Unfortunately, phephem calls it "moon_phase", so call ephem with that name.
# Return the result in percent.
return 100.0 * ephem_body.moon_phase
else:
# Just return the result unchanged. This will raise an AttributeError exception
# if the attribute does not exist.
return getattr(ephem_body, attr)
def _get_observer(self, time_ts):
# Build an ephem Observer object
observer = ephem.Observer()
observer.lat = math.radians(self.lat)
observer.long = math.radians(self.lon)
observer.elevation = self.altitude
observer.horizon = math.radians(self.horizon)
observer.temp = self.temperature
observer.pressure = self.pressure
observer.date = time_ts
return observer
def _get_ephem_body(heavenly_body):
# The library 'ephem' refers to heavenly bodies using a capitalized
# name. For example, the module used for 'mars' is 'ephem.Mars'.
cap_name = heavenly_body.capitalize()
# If the heavenly body is a star, or if the body does not exist, then an
# exception will be raised. Be prepared to catch it.
try:
ephem_body = getattr(ephem, cap_name)()
except AttributeError:
# That didn't work. Try a star. If this doesn't work either,
# then a KeyError exception will be raised.
ephem_body = ephem.star(cap_name)
return ephem_body
def timestamp_to_djd(time_ts):
"""Convert from a unix time stamp to the number of days since 12/31/1899 12:00 UTC
(aka "Dublin Julian Days")"""
# The number 25567.5 is the start of the Unix epoch (1/1/1970). Just add on the
# number of days since then
return 25567.5 + time_ts/86400.0
def djd_to_timestamp(djd):
"""Convert from number of days since 12/31/1899 12:00 UTC ("Dublin Julian Days") to unix time stamp"""
return (djd-25567.5) * 86400.0
if __name__ == '__main__':
def dummy_no_ephem():
"""Final test that does not use ephem.
First, get rid of 'ephem':
>>> p = sys.modules.pop('ephem')
Now do the rest as before:
>>> import os
>>> os.environ['TZ'] = 'America/Los_Angeles'
>>> t = 1238180400
>>> print timestamp_to_string(t)
2009-03-27 12:00:00 PDT (1238180400)
>>> almanac = Almanac(t, 46.0, -122.0)
Use "_sunrise" to make sure we're getting the results from weeutil (not ephem):
>>> print "Sunrise, sunset:", almanac._sunrise, almanac._sunset
Sunrise, sunset: 06:56 19:30"""
import doctest
from weeutil.weeutil import timestamp_to_string, timestamp_to_gmtime #@UnusedImport
if not doctest.testmod().failed:
print("PASSED")
| mit | -8,862,488,356,077,799,000 | 41.974138 | 128 | 0.609729 | false | 3.551835 | true | false | false | 0.009428 |
jishnu7/silpa | src/silpa/utils/silpalogger.py | 3 | 1982 | # -*- coding: utf-8 -*-
# Copyright 2009-2010
# Vasudev Kamath <kamathvasudev@gmail.com>
# Santhosh Thottingal <santhosh.thottingal@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
'''WSGI logging and event reporting middleware.'''
import os
import logging
from logging.handlers import TimedRotatingFileHandler
import silpautils
__all__ = ['silpalogger']
conf_values = silpautils.load_configuration()
LOG_FOLDER = conf_values.get("SILPA_SITE_LOG_FILDER","../logs")
LOG_FILE = os.path.join(LOG_FOLDER,"silpa.log")
silpautils.ensure_dir(LOG_FOLDER)
BACKUPS = 10
LOG_LEVELS = {
"info":logging.INFO,
"debug":logging.DEBUG,
"warning":logging.WARNING,
"error":logging.ERROR,
"critical":logging.CRITICAL,
}
def get_logger():
'''
Funcion creates and configures new instance of logger
for the SILPA and returns it
'''
global conf_values
logger = logging.getLogger("SILPA")
logger.setLevel(LOG_LEVELS.get(conf_values.get("SILPA_LOG_LEVEL","debug"),logging.DEBUG))
log_handler = TimedRotatingFileHandler(LOG_FILE,"midnight",BACKUPS)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
return logger
silpalogger = get_logger()
| agpl-3.0 | 7,336,653,597,579,084,000 | 32.033333 | 93 | 0.727043 | false | 3.545617 | false | false | false | 0.008073 |
tommy-u/enable | kiva/fonttools/font.py | 1 | 5554 | """
Defines the Kiva Font class and a utility method to parse free-form font
specification strings into Font instances.
"""
from __future__ import absolute_import, print_function
import copy
from kiva.constants import (DEFAULT, DECORATIVE, ROMAN, SCRIPT, SWISS, MODERN,
TELETYPE, NORMAL, ITALIC, BOLD, BOLD_ITALIC)
from .font_manager import FontProperties, fontManager
# Various maps used by str_to_font
font_families = {
'default': DEFAULT,
'decorative': DECORATIVE,
'roman': ROMAN,
'script': SCRIPT,
'swiss': SWISS,
'modern': MODERN
}
font_styles = {'italic': ITALIC}
font_weights = {'bold': BOLD}
font_noise = ['pt', 'point', 'family']
def str_to_font(fontspec):
"""
Converts a string specification of a font into a Font instance.
string specifications are of the form: "modern 12", "9 roman italic",
and so on.
"""
point_size = 10
family = DEFAULT
style = NORMAL
weight = NORMAL
underline = 0
facename = []
for word in fontspec.split():
lword = word.lower()
if lword in font_families:
family = font_families[lword]
elif lword in font_styles:
style = font_styles[lword]
elif lword in font_weights:
weight = font_weights[lword]
elif lword == 'underline':
underline = 1
elif lword not in font_noise:
try:
point_size = int(lword)
except:
facename.append(word)
return Font(size=point_size, family=family, weight=weight, style=style,
underline=underline, face_name=' '.join(facename))
class Font(object):
""" Font class for device independent font specification.
It is primarily based on wxPython, but looks to be similar to
the needs of Mac OS X, etc.
The family defaults to SWISS so that font rotation will work
correctly under wxPython. Revisit as we get more platforms
defined.
"""
# Maps the constants for font families to names to use when searching for
# fonts.
familymap = {
DEFAULT: "serif",
SWISS: "sans-serif",
ROMAN: "serif",
MODERN: "sans-serif",
DECORATIVE: "fantasy",
SCRIPT: "script",
TELETYPE: "monospace"
}
def __init__(self, face_name="", size=12, family=SWISS, weight=NORMAL,
style=NORMAL, underline=0, encoding=DEFAULT):
if (type(size) != int) or (type(family) != type(SWISS)) or \
(type(weight) != type(NORMAL)) or (type(style) != type(NORMAL)) or \
(type(underline) != int) or (not isinstance(face_name, basestring)) or \
(type(encoding) != type(DEFAULT)):
raise RuntimeError("Bad value in Font() constructor.")
# HACK: C++ stuff expects a string (not unicode) for the face_name,
# so fix if needed. See ticket #2111 in the CP Trac.
if isinstance(face_name, unicode):
face_name = face_name.encode("latin1")
self.size = size
self.family = family
self.weight = weight
self.style = style
self.underline = underline
self.face_name = face_name
self.encoding = encoding
def findfont(self):
""" Returns the file name containing the font that most closely matches
our font properties.
"""
fp = self._make_font_props()
return str(fontManager.findfont(fp))
def findfontname(self):
""" Returns the name of the font that most closely matches our font
properties
"""
fp = self._make_font_props()
return fp.get_name()
def _make_font_props(self):
""" Returns a font_manager.FontProperties object that encapsulates our
font properties
"""
# XXX: change the weight to a numerical value
if self.style == BOLD or self.style == BOLD_ITALIC:
weight = "bold"
else:
weight = "normal"
if self.style == ITALIC or self.style == BOLD_ITALIC:
style = "italic"
else:
style = "normal"
fp = FontProperties(family=self.familymap[self.family], style=style,
weight=weight, size=self.size)
if self.face_name != "":
fp.set_name(self.face_name)
return fp
def _get_name(self):
return self.face_name
def _set_name(self, val):
self.face_name = val
name = property(_get_name, _set_name)
def copy(self):
""" Returns a copy of the font object."""
return copy.deepcopy(self)
def __eq__(self, other):
result = False
try:
if (self.family == other.family and
self.size == other.size and
self.weight == other.weight and
self.style == other.style and
self.underline == other.underline and
self.face_name == other.face_name and
self.encoding == other.encoding):
result = True
except AttributeError:
pass
return result
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
fmt = ("Font(size=%d,family=%d,weight=%d, style=%d, face_name='%s', " +
"encoding=%d)")
return fmt % (self.size, self.family, self.weight, self.style,
self.face_name, self.encoding)
| bsd-3-clause | 8,895,763,609,779,762,000 | 32.257485 | 88 | 0.568419 | false | 3.987078 | false | false | false | 0.00054 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/PIL/ImageCms.py | 29 | 37195 | # The Python Imaging Library.
# $Id$
# Optional color management support, based on Kevin Cazabon's PyCMS
# library.
# History:
# 2009-03-08 fl Added to PIL.
# Copyright (C) 2002-2003 Kevin Cazabon
# Copyright (c) 2009 by Fredrik Lundh
# Copyright (c) 2013 by Eric Soroos
# See the README file for information on usage and redistribution. See
# below for the original description.
from __future__ import print_function
import sys
from PIL import Image
try:
from PIL import _imagingcms
except ImportError as ex:
# Allow error import for doc purposes, but error out when accessing
# anything in core.
from _util import deferred_error
_imagingcms = deferred_error(ex)
from PIL._util import isStringType
DESCRIPTION = """
pyCMS
a Python / PIL interface to the littleCMS ICC Color Management System
Copyright (C) 2002-2003 Kevin Cazabon
kevin@cazabon.com
http://www.cazabon.com
pyCMS home page: http://www.cazabon.com/pyCMS
littleCMS home page: http://www.littlecms.com
(littleCMS is Copyright (C) 1998-2001 Marti Maria)
Originally released under LGPL. Graciously donated to PIL in
March 2009, for distribution under the standard PIL license
The pyCMS.py module provides a "clean" interface between Python/PIL and
pyCMSdll, taking care of some of the more complex handling of the direct
pyCMSdll functions, as well as error-checking and making sure that all
relevant data is kept together.
While it is possible to call pyCMSdll functions directly, it's not highly
recommended.
Version History:
1.0.0 pil Oct 2013 Port to LCMS 2.
0.1.0 pil mod March 10, 2009
Renamed display profile to proof profile. The proof
profile is the profile of the device that is being
simulated, not the profile of the device which is
actually used to display/print the final simulation
(that'd be the output profile) - also see LCMSAPI.txt
input colorspace -> using 'renderingIntent' -> proof
colorspace -> using 'proofRenderingIntent' -> output
colorspace
Added LCMS FLAGS support.
Added FLAGS["SOFTPROOFING"] as default flag for
buildProofTransform (otherwise the proof profile/intent
would be ignored).
0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms
0.0.2 alpha Jan 6, 2002
Added try/except statements around type() checks of
potential CObjects... Python won't let you use type()
on them, and raises a TypeError (stupid, if you ask
me!)
Added buildProofTransformFromOpenProfiles() function.
Additional fixes in DLL, see DLL code for details.
0.0.1 alpha first public release, Dec. 26, 2002
Known to-do list with current version (of Python interface, not pyCMSdll):
none
"""
VERSION = "1.0.0 pil"
# --------------------------------------------------------------------.
core = _imagingcms
#
# intent/direction values
INTENT_PERCEPTUAL = 0
INTENT_RELATIVE_COLORIMETRIC = 1
INTENT_SATURATION = 2
INTENT_ABSOLUTE_COLORIMETRIC = 3
DIRECTION_INPUT = 0
DIRECTION_OUTPUT = 1
DIRECTION_PROOF = 2
#
# flags
FLAGS = {
"MATRIXINPUT": 1,
"MATRIXOUTPUT": 2,
"MATRIXONLY": (1 | 2),
"NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot
# Don't create prelinearization tables on precalculated transforms
# (internal use):
"NOPRELINEARIZATION": 16,
"GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink)
"NOTCACHE": 64, # Inhibit 1-pixel cache
"NOTPRECALC": 256,
"NULLTRANSFORM": 512, # Don't transform anyway
"HIGHRESPRECALC": 1024, # Use more memory to give better accuracy
"LOWRESPRECALC": 2048, # Use less memory to minimize resources
"WHITEBLACKCOMPENSATION": 8192,
"BLACKPOINTCOMPENSATION": 8192,
"GAMUTCHECK": 4096, # Out of Gamut alarm
"SOFTPROOFING": 16384, # Do softproofing
"PRESERVEBLACK": 32768, # Black preservation
"NODEFAULTRESOURCEDEF": 16777216, # CRD special
"GRIDPOINTS": lambda n: ((n) & 0xFF) << 16 # Gridpoints
}
_MAX_FLAG = 0
for flag in FLAGS.values():
if isinstance(flag, int):
_MAX_FLAG = _MAX_FLAG | flag
# --------------------------------------------------------------------.
# Experimental PIL-level API
# --------------------------------------------------------------------.
##
# Profile.
class ImageCmsProfile(object):
def __init__(self, profile):
"""
:param profile: Either a string representing a filename,
a file like object containing a profile or a
low-level profile object
"""
if isStringType(profile):
self._set(core.profile_open(profile), profile)
elif hasattr(profile, "read"):
self._set(core.profile_frombytes(profile.read()))
elif isinstance(profile, _imagingcms.CmsProfile):
self._set(profile)
else:
raise TypeError("Invalid type for Profile")
def _set(self, profile, filename=None):
self.profile = profile
self.filename = filename
if profile:
self.product_name = None # profile.product_name
self.product_info = None # profile.product_info
else:
self.product_name = None
self.product_info = None
def tobytes(self):
"""
Returns the profile in a format suitable for embedding in
saved images.
:returns: a bytes object containing the ICC profile.
"""
return core.profile_tobytes(self.profile)
class ImageCmsTransform(Image.ImagePointHandler):
"""
Transform. This can be used with the procedural API, or with the standard
Image.point() method.
Will return the output profile in the output.info['icc_profile'].
"""
def __init__(self, input, output, input_mode, output_mode,
intent=INTENT_PERCEPTUAL, proof=None,
proof_intent=INTENT_ABSOLUTE_COLORIMETRIC, flags=0):
if proof is None:
self.transform = core.buildTransform(
input.profile, output.profile,
input_mode, output_mode,
intent,
flags
)
else:
self.transform = core.buildProofTransform(
input.profile, output.profile, proof.profile,
input_mode, output_mode,
intent, proof_intent,
flags
)
# Note: inputMode and outputMode are for pyCMS compatibility only
self.input_mode = self.inputMode = input_mode
self.output_mode = self.outputMode = output_mode
self.output_profile = output
def point(self, im):
return self.apply(im)
def apply(self, im, imOut=None):
im.load()
if imOut is None:
imOut = Image.new(self.output_mode, im.size, None)
self.transform.apply(im.im.id, imOut.im.id)
imOut.info['icc_profile'] = self.output_profile.tobytes()
return imOut
def apply_in_place(self, im):
im.load()
if im.mode != self.output_mode:
raise ValueError("mode mismatch") # wrong output mode
self.transform.apply(im.im.id, im.im.id)
im.info['icc_profile'] = self.output_profile.tobytes()
return im
def get_display_profile(handle=None):
""" (experimental) Fetches the profile for the current display device.
:returns: None if the profile is not known.
"""
if sys.platform == "win32":
from PIL import ImageWin
if isinstance(handle, ImageWin.HDC):
profile = core.get_display_profile_win32(handle, 1)
else:
profile = core.get_display_profile_win32(handle or 0)
else:
try:
get = _imagingcms.get_display_profile
except AttributeError:
return None
else:
profile = get()
return ImageCmsProfile(profile)
# --------------------------------------------------------------------.
# pyCMS compatible layer
# --------------------------------------------------------------------.
class PyCMSError(Exception):
""" (pyCMS) Exception class.
This is used for all errors in the pyCMS API. """
pass
def profileToProfile(
im, inputProfile, outputProfile, renderingIntent=INTENT_PERCEPTUAL,
outputMode=None, inPlace=0, flags=0):
"""
(pyCMS) Applies an ICC transformation to a given image, mapping from
inputProfile to outputProfile.
If the input or output profiles specified are not valid filenames, a
PyCMSError will be raised. If inPlace == TRUE and outputMode != im.mode,
a PyCMSError will be raised. If an error occurs during application of
the profiles, a PyCMSError will be raised. If outputMode is not a mode
supported by the outputProfile (or by pyCMS), a PyCMSError will be
raised.
This function applies an ICC transformation to im from inputProfile's
color space to outputProfile's color space using the specified rendering
intent to decide how to handle out-of-gamut colors.
OutputMode can be used to specify that a color mode conversion is to
be done using these profiles, but the specified profiles must be able
to handle that mode. I.e., if converting im from RGB to CMYK using
profiles, the input profile must handle RGB data, and the output
profile must handle CMYK data.
:param im: An open PIL image object (i.e. Image.new(...) or
Image.open(...), etc.)
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this image, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
profile you wish to use for this image, or a profile object
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:param outputMode: A valid PIL mode for the output image (i.e. "RGB",
"CMYK", etc.). Note: if rendering the image "inPlace", outputMode
MUST be the same mode as the input, or omitted completely. If
omitted, the outputMode will be the same as the mode of the input
image (im.mode)
:param inPlace: Boolean (1 = True, None or 0 = False). If True, the
original image is modified in-place, and None is returned. If False
(default), a new Image object is returned with the transform applied.
:param flags: Integer (0-...) specifying additional flags
:returns: Either None or a new PIL image object, depending on value of
inPlace
:exception PyCMSError:
"""
if outputMode is None:
outputMode = im.mode
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError(
"flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
transform = ImageCmsTransform(
inputProfile, outputProfile, im.mode, outputMode,
renderingIntent, flags=flags
)
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
return imOut
def getOpenProfile(profileFilename):
"""
(pyCMS) Opens an ICC profile file.
The PyCMSProfile object can be passed back into pyCMS for use in creating
transforms and such (as in ImageCms.buildTransformFromOpenProfiles()).
If profileFilename is not a vaild filename for an ICC profile, a PyCMSError
will be raised.
:param profileFilename: String, as a valid filename path to the ICC profile
you wish to open, or a file-like object.
:returns: A CmsProfile class object.
:exception PyCMSError:
"""
try:
return ImageCmsProfile(profileFilename)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def buildTransform(
inputProfile, outputProfile, inMode, outMode,
renderingIntent=INTENT_PERCEPTUAL, flags=0):
"""
(pyCMS) Builds an ICC transform mapping from the inputProfile to the
outputProfile. Use applyTransform to apply the transform to a given
image.
If the input or output profiles specified are not valid filenames, a
PyCMSError will be raised. If an error occurs during creation of the
transform, a PyCMSError will be raised.
If inMode or outMode are not a mode supported by the outputProfile (or
by pyCMS), a PyCMSError will be raised.
This function builds and returns an ICC transform from the inputProfile
to the outputProfile using the renderingIntent to determine what to do
with out-of-gamut colors. It will ONLY work for converting images that
are in inMode to images that are in outMode color format (PIL mode,
i.e. "RGB", "RGBA", "CMYK", etc.).
Building the transform is a fair part of the overhead in
ImageCms.profileToProfile(), so if you're planning on converting multiple
images using the same input/output settings, this can save you time.
Once you have a transform object, it can be used with
ImageCms.applyProfile() to convert images without the need to re-compute
the lookup table for the transform.
The reason pyCMS returns a class object rather than a handle directly
to the transform is that it needs to keep track of the PIL input/output
modes that the transform is meant for. These attributes are stored in
the "inMode" and "outMode" attributes of the object (which can be
manually overridden if you really want to, but I don't know of any
time that would be of use, or would even work).
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this transform, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
profile you wish to use for this transform, or a profile object
:param inMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param outMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:param flags: Integer (0-...) specifying additional flags
:returns: A CmsTransform class object.
:exception PyCMSError:
"""
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError(
"flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
return ImageCmsTransform(
inputProfile, outputProfile, inMode, outMode,
renderingIntent, flags=flags)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def buildProofTransform(
inputProfile, outputProfile, proofProfile, inMode, outMode,
renderingIntent=INTENT_PERCEPTUAL,
proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC,
flags=FLAGS["SOFTPROOFING"]):
"""
(pyCMS) Builds an ICC transform mapping from the inputProfile to the
outputProfile, but tries to simulate the result that would be
obtained on the proofProfile device.
If the input, output, or proof profiles specified are not valid
filenames, a PyCMSError will be raised.
If an error occurs during creation of the transform, a PyCMSError will
be raised.
If inMode or outMode are not a mode supported by the outputProfile
(or by pyCMS), a PyCMSError will be raised.
This function builds and returns an ICC transform from the inputProfile
to the outputProfile, but tries to simulate the result that would be
obtained on the proofProfile device using renderingIntent and
proofRenderingIntent to determine what to do with out-of-gamut
colors. This is known as "soft-proofing". It will ONLY work for
converting images that are in inMode to images that are in outMode
color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.).
Usage of the resulting transform object is exactly the same as with
ImageCms.buildTransform().
Proof profiling is generally used when using an output device to get a
good idea of what the final printed/displayed image would look like on
the proofProfile device when it's quicker and easier to use the
output device for judging color. Generally, this means that the
output device is a monitor, or a dye-sub printer (etc.), and the simulated
device is something more expensive, complicated, or time consuming
(making it difficult to make a real print for color judgement purposes).
Soft-proofing basically functions by adjusting the colors on the
output device to match the colors of the device being simulated. However,
when the simulated device has a much wider gamut than the output
device, you may obtain marginal results.
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this transform, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
(monitor, usually) profile you wish to use for this transform, or a
profile object
:param proofProfile: String, as a valid filename path to the ICC proof
profile you wish to use for this transform, or a profile object
:param inMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param outMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the input->proof (simulated) transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:param proofRenderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for proof->output transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:param flags: Integer (0-...) specifying additional flags
:returns: A CmsTransform class object.
:exception PyCMSError:
"""
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError(
"flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
if not isinstance(proofProfile, ImageCmsProfile):
proofProfile = ImageCmsProfile(proofProfile)
return ImageCmsTransform(
inputProfile, outputProfile, inMode, outMode, renderingIntent,
proofProfile, proofRenderingIntent, flags)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
buildTransformFromOpenProfiles = buildTransform
buildProofTransformFromOpenProfiles = buildProofTransform
def applyTransform(im, transform, inPlace=0):
"""
(pyCMS) Applies a transform to a given image.
If im.mode != transform.inMode, a PyCMSError is raised.
If inPlace == TRUE and transform.inMode != transform.outMode, a
PyCMSError is raised.
If im.mode, transfer.inMode, or transfer.outMode is not supported by
pyCMSdll or the profiles you used for the transform, a PyCMSError is
raised.
If an error occurs while the transform is being applied, a PyCMSError
is raised.
This function applies a pre-calculated transform (from
ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles())
to an image. The transform can be used for multiple images, saving
considerable calculation time if doing the same conversion multiple times.
If you want to modify im in-place instead of receiving a new image as
the return value, set inPlace to TRUE. This can only be done if
transform.inMode and transform.outMode are the same, because we can't
change the mode in-place (the buffer sizes for some modes are
different). The default behavior is to return a new Image object of
the same dimensions in mode transform.outMode.
:param im: A PIL Image object, and im.mode must be the same as the inMode
supported by the transform.
:param transform: A valid CmsTransform class object
:param inPlace: Bool (1 == True, 0 or None == False). If True, im is
modified in place and None is returned, if False, a new Image object
with the transform applied is returned (and im is not changed). The
default is False.
:returns: Either None, or a new PIL Image object, depending on the value of
inPlace. The profile will be returned in the image's
info['icc_profile'].
:exception PyCMSError:
"""
try:
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (TypeError, ValueError) as v:
raise PyCMSError(v)
return imOut
def createProfile(colorSpace, colorTemp=-1):
"""
(pyCMS) Creates a profile.
If colorSpace not in ["LAB", "XYZ", "sRGB"], a PyCMSError is raised
If using LAB and colorTemp != a positive integer, a PyCMSError is raised.
If an error occurs while creating the profile, a PyCMSError is raised.
Use this function to create common profiles on-the-fly instead of
having to supply a profile on disk and knowing the path to it. It
returns a normal CmsProfile object that can be passed to
ImageCms.buildTransformFromOpenProfiles() to create a transform to apply
to images.
:param colorSpace: String, the color space of the profile you wish to
create.
Currently only "LAB", "XYZ", and "sRGB" are supported.
:param colorTemp: Positive integer for the white point for the profile, in
degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50
illuminant if omitted (5000k). colorTemp is ONLY applied to LAB
profiles, and is ignored for XYZ and sRGB.
:returns: A CmsProfile class object
:exception PyCMSError:
"""
if colorSpace not in ["LAB", "XYZ", "sRGB"]:
raise PyCMSError(
"Color space not supported for on-the-fly profile creation (%s)"
% colorSpace)
if colorSpace == "LAB":
try:
colorTemp = float(colorTemp)
except:
raise PyCMSError(
"Color temperature must be numeric, \"%s\" not valid"
% colorTemp)
try:
return core.createProfile(colorSpace, colorTemp)
except (TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileName(profile):
"""
(pyCMS) Gets the internal product name for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised If an error occurs while trying to obtain the
name tag, a PyCMSError is raised.
Use this function to obtain the INTERNAL name of the profile (stored
in an ICC tag in the profile itself), usually the one used when the
profile was originally created. Sometimes this tag also contains
additional information supplied by the creator.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal name of the profile as stored
in an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# do it in python, not c.
# // name was "%s - %s" (model, manufacturer) || Description ,
# // but if the Model and Manufacturer were the same or the model
# // was long, Just the model, in 1.x
model = profile.profile.product_model
manufacturer = profile.profile.product_manufacturer
if not (model or manufacturer):
return profile.profile.product_description + "\n"
if not manufacturer or len(model) > 30:
return model + "\n"
return "%s - %s\n" % (model, manufacturer)
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileInfo(profile):
"""
(pyCMS) Gets the internal product information for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the info tag, a PyCMSError
is raised
Use this function to obtain the information stored in the profile's
info tag. This often contains details about the profile, and how it
was created, as supplied by the creator.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# add an extra newline to preserve pyCMS compatibility
# Python, not C. the white point bits weren't working well,
# so skipping.
# // info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint
description = profile.profile.product_description
cpright = profile.profile.product_copyright
arr = []
for elt in (description, cpright):
if elt:
arr.append(elt)
return "\r\n\r\n".join(arr) + "\r\n\r\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileCopyright(profile):
"""
(pyCMS) Gets the copyright for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the copyright tag, a PyCMSError
is raised
Use this function to obtain the information stored in the profile's
copyright tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_copyright + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileManufacturer(profile):
"""
(pyCMS) Gets the manufacturer for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the manufacturer tag, a
PyCMSError is raised
Use this function to obtain the information stored in the profile's
manufacturer tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_manufacturer + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileModel(profile):
"""
(pyCMS) Gets the model for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the model tag, a PyCMSError
is raised
Use this function to obtain the information stored in the profile's
model tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_model + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getProfileDescription(profile):
"""
(pyCMS) Gets the description for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the description tag, a PyCMSError
is raised
Use this function to obtain the information stored in the profile's
description tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in an
ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_description + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def getDefaultIntent(profile):
"""
(pyCMS) Gets the default intent name for the given profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the default intent, a
PyCMSError is raised.
Use this function to determine the default (and usually best optimized)
rendering intent for this profile. Most profiles support multiple
rendering intents, but are intended mostly for one type of conversion.
If you wish to use a different intent than returned, use
ImageCms.isIntentSupported() to verify it will work first.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: Integer 0-3 specifying the default rendering intent for this
profile.
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.rendering_intent
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def isIntentSupported(profile, intent, direction):
"""
(pyCMS) Checks if a given intent is supported.
Use this function to verify that you can use your desired
renderingIntent with profile, and that profile can be used for the
input/output/proof profile as you desire.
Some profiles are created specifically for one "direction", can cannot
be used for others. Some profiles can only be used for certain
rendering intents... so it's best to either verify this before trying
to create a transform with them (using this function), or catch the
potential PyCMSError that will occur if they don't support the modes
you select.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:param intent: Integer (0-3) specifying the rendering intent you wish to
use with this profile
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and what
they do.
:param direction: Integer specifying if the profile is to be used for input,
output, or proof
INPUT = 0 (or use ImageCms.DIRECTION_INPUT)
OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT)
PROOF = 2 (or use ImageCms.DIRECTION_PROOF)
:returns: 1 if the intent/direction are supported, -1 if they are not.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# FIXME: I get different results for the same data w. different
# compilers. Bug in LittleCMS or in the binding?
if profile.profile.is_intent_supported(intent, direction):
return 1
else:
return -1
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
def versions():
"""
(pyCMS) Fetches versions.
"""
return (
VERSION, core.littlecms_version,
sys.version.split()[0], Image.VERSION
)
# --------------------------------------------------------------------
if __name__ == "__main__":
# create a cheap manual from the __doc__ strings for the functions above
print(__doc__)
for f in dir(sys.modules[__name__]):
doc = None
try:
exec("doc = %s.__doc__" % (f))
if "pyCMS" in doc:
# so we don't get the __doc__ string for imported modules
print("=" * 80)
print("%s" % f)
print(doc)
except (AttributeError, TypeError):
pass
| mit | -5,769,082,320,014,373,000 | 37.187885 | 93 | 0.65939 | false | 4.141982 | false | false | false | 0.000699 |
gianina-ingenuity/titanium-branch-deep-linking | testbed/x/mobilesdk/osx/5.5.1.GA/common/markdown/preprocessors.py | 112 | 7128 |
"""
PRE-PROCESSORS
=============================================================================
Preprocessors work on source text before we start doing anything too
complicated.
"""
import re
import markdown
HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:"
HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Preprocessor (Processor):
"""
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend markdown.Preprocessor.
"""
def run(self, lines):
"""
Each subclass of Preprocessor should override the `run` method, which
takes the document as a list of strings split by newlines and returns
the (possibly modified) list of lines.
"""
pass
class HtmlStash:
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = HTML_PLACEHOLDER % self.html_counter
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
class HtmlBlockPreprocessor(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
right_tag_patterns = ["</%s>", "%s>"]
def _get_left_tag(self, block):
return block[1:].replace(">", " ", 1).split()[0].lower()
def _get_right_tag(self, left_tag, block):
for p in self.right_tag_patterns:
tag = p % left_tag
i = block.rfind(tag)
if i > 2:
return tag.lstrip("<").rstrip(">"), i + len(p)-2 + len(left_tag)
return block.rstrip()[-len(left_tag)-2:-1].lower(), len(block)
def _equal_tags(self, left_tag, right_tag):
if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] \
and right_tag[0] != "<":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def run(self, lines):
text = "\n".join(lines)
new_blocks = []
text = text.split("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
while text:
block = text[0]
if block.startswith("\n"):
block = block[1:]
text = text[1:]
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<"):
left_tag = self._get_left_tag(block)
right_tag, data_index = self._get_right_tag(left_tag, block)
if block[1] == "!":
# is a comment block
left_tag = "--"
right_tag, data_index = self._get_right_tag(left_tag, block)
# keep checking conditions below and maybe just append
if data_index < len(block) \
and markdown.isBlockLevel(left_tag):
text.insert(0, block[data_index:])
block = block[:data_index]
if not (markdown.isBlockLevel(left_tag) \
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
else: #if not block[1] == "!":
# if is block level tag and is not complete
if markdown.isBlockLevel(left_tag) or left_tag == "--" \
and not block.rstrip().endswith(">"):
items.append(block.strip())
in_tag = True
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
new_blocks.append(block)
else:
items.append(block.strip())
right_tag, data_index = self._get_right_tag(left_tag, block)
if self._equal_tags(left_tag, right_tag):
# if find closing tag
in_tag = False
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
items = []
if items:
new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n')
new_text = "\n\n".join(new_blocks)
return new_text.split("\n")
class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """
RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL)
def run (self, lines):
new_text = [];
for line in lines:
m = self.RE.match(line)
if m:
id = m.group(2).strip().lower()
t = m.group(4).strip() # potential title
if not t:
self.markdown.references[id] = (m.group(3), t)
elif (len(t) >= 2
and (t[0] == t[-1] == "\""
or t[0] == t[-1] == "\'"
or (t[0] == "(" and t[-1] == ")") ) ):
self.markdown.references[id] = (m.group(3), t[1:-1])
else:
new_text.append(line)
else:
new_text.append(line)
return new_text #+ "\n"
| mit | 6,484,113,970,539,729,000 | 32.153488 | 84 | 0.483165 | false | 4.296564 | false | false | false | 0.00463 |
mskrzypkows/servo | tests/wpt/css-tests/tools/wptserve/wptserve/handlers.py | 86 | 12804 | import cgi
import json
import os
import traceback
import urllib
import urlparse
from constants import content_types
from pipes import Pipeline, template
from ranges import RangeParser
from request import Authentication
from response import MultipartContent
from utils import HTTPException
__all__ = ["file_handler", "python_script_handler",
"FunctionHandler", "handler", "json_handler",
"as_is_handler", "ErrorHandler", "BasicAuthHandler"]
def guess_content_type(path):
ext = os.path.splitext(path)[1].lstrip(".")
if ext in content_types:
return content_types[ext]
return "application/octet-stream"
def filesystem_path(base_path, request, url_base="/"):
if base_path is None:
base_path = request.doc_root
path = request.url_parts.path
if path.startswith(url_base):
path = path[len(url_base):]
if ".." in path:
raise HTTPException(404)
new_path = os.path.join(base_path, path)
# Otherwise setting path to / allows access outside the root directory
if not new_path.startswith(base_path):
raise HTTPException(404)
return new_path
class DirectoryHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
if not request.url_parts.path.endswith("/"):
raise HTTPException(404)
path = filesystem_path(self.base_path, request, self.url_base)
if not os.path.isdir(path):
raise HTTPException(404, "%s is not a directory" % path)
response.headers = [("Content-Type", "text/html")]
response.content = """<!doctype html>
<meta name="viewport" content="width=device-width">
<title>Directory listing for %(path)s</title>
<h1>Directory listing for %(path)s</h1>
<ul>
%(items)s
</li>
""" % {"path": cgi.escape(request.url_parts.path),
"items": "\n".join(self.list_items(request, path))}
def list_items(self, request, path):
# TODO: this won't actually list all routes, only the
# ones that correspond to a real filesystem path. It's
# not possible to list every route that will match
# something, but it should be possible to at least list the
# statically defined ones
base_path = request.url_parts.path
if not base_path.endswith("/"):
base_path += "/"
if base_path != "/":
link = urlparse.urljoin(base_path, "..")
yield ("""<li class="dir"><a href="%(link)s">%(name)s</a>""" %
{"link": link, "name": ".."})
for item in sorted(os.listdir(path)):
link = cgi.escape(urllib.quote(item))
if os.path.isdir(os.path.join(path, item)):
link += "/"
class_ = "dir"
else:
class_ = "file"
yield ("""<li class="%(class)s"><a href="%(link)s">%(name)s</a>""" %
{"link": link, "name": cgi.escape(item), "class": class_})
directory_handler = DirectoryHandler()
class FileHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.directory_handler = DirectoryHandler(self.base_path, self.url_base)
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
if os.path.isdir(path):
return self.directory_handler(request, response)
try:
#This is probably racy with some other process trying to change the file
file_size = os.stat(path).st_size
response.headers.update(self.get_headers(request, path))
if "Range" in request.headers:
try:
byte_ranges = RangeParser()(request.headers['Range'], file_size)
except HTTPException as e:
if e.code == 416:
response.headers.set("Content-Range", "bytes */%i" % file_size)
raise
else:
byte_ranges = None
data = self.get_data(response, path, byte_ranges)
response.content = data
query = urlparse.parse_qs(request.url_parts.query)
pipeline = None
if "pipe" in query:
pipeline = Pipeline(query["pipe"][-1])
elif os.path.splitext(path)[0].endswith(".sub"):
pipeline = Pipeline("sub")
if pipeline is not None:
response = pipeline(request, response)
return response
except (OSError, IOError):
raise HTTPException(404)
def get_headers(self, request, path):
rv = self.default_headers(path)
rv.extend(self.load_headers(request, os.path.join(os.path.split(path)[0], "__dir__")))
rv.extend(self.load_headers(request, path))
return rv
def load_headers(self, request, path):
headers_path = path + ".sub.headers"
if os.path.exists(headers_path):
use_sub = True
else:
headers_path = path + ".headers"
use_sub = False
try:
with open(headers_path) as headers_file:
data = headers_file.read()
except IOError:
return []
else:
if use_sub:
data = template(request, data)
return [tuple(item.strip() for item in line.split(":", 1))
for line in data.splitlines() if line]
def get_data(self, response, path, byte_ranges):
with open(path, 'rb') as f:
if byte_ranges is None:
return f.read()
else:
response.status = 206
if len(byte_ranges) > 1:
parts_content_type, content = self.set_response_multipart(response,
byte_ranges,
f)
for byte_range in byte_ranges:
content.append_part(self.get_range_data(f, byte_range),
parts_content_type,
[("Content-Range", byte_range.header_value())])
return content
else:
response.headers.set("Content-Range", byte_ranges[0].header_value())
return self.get_range_data(f, byte_ranges[0])
def set_response_multipart(self, response, ranges, f):
parts_content_type = response.headers.get("Content-Type")
if parts_content_type:
parts_content_type = parts_content_type[-1]
else:
parts_content_type = None
content = MultipartContent()
response.headers.set("Content-Type", "multipart/byteranges; boundary=%s" % content.boundary)
return parts_content_type, content
def get_range_data(self, f, byte_range):
f.seek(byte_range.lower)
return f.read(byte_range.upper - byte_range.lower)
def default_headers(self, path):
return [("Content-Type", guess_content_type(path))]
file_handler = FileHandler()
class PythonScriptHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __repr__(self):
return "<%s base_path:%s url_base:%s>" % (self.__class__.__name__, self.base_path, self.url_base)
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
environ = {"__file__": path}
execfile(path, environ, environ)
if "main" in environ:
handler = FunctionHandler(environ["main"])
handler(request, response)
else:
raise HTTPException(500, "No main function in script %s" % path)
except IOError:
raise HTTPException(404)
python_script_handler = PythonScriptHandler()
class FunctionHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
try:
rv = self.func(request, response)
except Exception:
msg = traceback.format_exc()
raise HTTPException(500, message=msg)
if rv is not None:
if isinstance(rv, tuple):
if len(rv) == 3:
status, headers, content = rv
response.status = status
elif len(rv) == 2:
headers, content = rv
else:
raise HTTPException(500)
response.headers.update(headers)
else:
content = rv
response.content = content
#The generic name here is so that this can be used as a decorator
def handler(func):
return FunctionHandler(func)
class JsonHandler(object):
def __init__(self, func):
self.func = func
def __call__(self, request, response):
return FunctionHandler(self.handle_request)(request, response)
def handle_request(self, request, response):
rv = self.func(request, response)
response.headers.set("Content-Type", "application/json")
enc = json.dumps
if isinstance(rv, tuple):
rv = list(rv)
value = tuple(rv[:-1] + [enc(rv[-1])])
length = len(value[-1])
else:
value = enc(rv)
length = len(value)
response.headers.set("Content-Length", length)
return value
def json_handler(func):
return JsonHandler(func)
class AsIsHandler(object):
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
def __call__(self, request, response):
path = filesystem_path(self.base_path, request, self.url_base)
try:
with open(path) as f:
response.writer.write_content(f.read())
response.close_connection = True
except IOError:
raise HTTPException(404)
as_is_handler = AsIsHandler()
class BasicAuthHandler(object):
def __init__(self, handler, user, password):
"""
A Basic Auth handler
:Args:
- handler: a secondary handler for the request after authentication is successful (example file_handler)
- user: string of the valid user name or None if any / all credentials are allowed
- password: string of the password required
"""
self.user = user
self.password = password
self.handler = handler
def __call__(self, request, response):
if "authorization" not in request.headers:
response.status = 401
response.headers.set("WWW-Authenticate", "Basic")
return response
else:
auth = Authentication(request.headers)
if self.user is not None and (self.user != auth.username or self.password != auth.password):
response.set_error(403, "Invalid username or password")
return response
return self.handler(request, response)
basic_auth_handler = BasicAuthHandler(file_handler, None, None)
class ErrorHandler(object):
def __init__(self, status):
self.status = status
def __call__(self, request, response):
response.set_error(self.status)
class StaticHandler(object):
def __init__(self, path, format_args, content_type, **headers):
"""Hander that reads a file from a path and substitutes some fixed data
:param path: Path to the template file to use
:param format_args: Dictionary of values to substitute into the template file
:param content_type: Content type header to server the response with
:param headers: List of headers to send with responses"""
with open(path) as f:
self.data = f.read() % format_args
self.resp_headers = [("Content-Type", content_type)]
for k, v in headers.iteritems():
resp_headers.append((k.replace("_", "-"), v))
self.handler = handler(self.handle_request)
def handle_request(self, request, response):
return self.resp_headers, self.data
def __call__(self, request, response):
rv = self.handler(request, response)
return rv
| mpl-2.0 | -772,738,974,558,436,400 | 33.793478 | 113 | 0.568963 | false | 4.172043 | false | false | false | 0.002577 |
devopshq/crosspm | crosspm/cpm.py | 1 | 16409 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
{app_name}
Usage:
crosspm download [options]
crosspm lock [DEPS] [DEPSLOCK] [options]
crosspm usedby [DEPS] [options]
crosspm pack <OUT> <SOURCE> [options]
crosspm cache [size | age | clear [hard]]
crosspm -h | --help
crosspm --version
Options:
<OUT> Output file.
<SOURCE> Source directory path.
-h, --help Show this screen.
--version Show version.
-L, --list Do not load packages and its dependencies. Just show what's found.
-v LEVEL, --verbose=LEVEL Set output verbosity: ({verb_level}) [default: ].
-l LOGFILE, --log=LOGFILE File name for log output. Log level is '{log_default}' if set when verbose doesn't.
-c FILE, --config=FILE Path to configuration file.
-o OPTIONS, --options OPTIONS Extra options.
--deps-path=FILE Path to file with dependencies [./{deps_default}]
--depslock-path=FILE Path to file with locked dependencies [./{deps_lock_default}]
--dependencies-content=CONTENT Content for dependencies.txt file
--dependencies-lock-content=CONTENT Content for dependencies.txt.lock file
--lock-on-success Save file with locked dependencies next to original one if download succeeds
--out-format=TYPE Output data format. Available formats:({out_format}) [default: {out_format_default}]
--output=FILE Output file name (required if --out_format is not stdout)
--output-template=FILE Template path, e.g. nuget.packages.config.j2 (required if --out_format=jinja)
--no-fails Ignore fails config if possible.
--recursive=VALUE Process all packages recursively to find and lock all dependencies
--prefer-local Do not search package if exist in cache
--stdout Print info and debug message to STDOUT, error to STDERR. Otherwise - all messages to STDERR
""" # noqa
import logging
import os
import shlex
import sys
import time
from docopt import docopt
from crosspm import version
from crosspm.helpers.archive import Archive
from crosspm.helpers.config import (
CROSSPM_DEPENDENCY_LOCK_FILENAME,
CROSSPM_DEPENDENCY_FILENAME,
Config,
)
from crosspm.helpers.content import DependenciesContent
from crosspm.helpers.downloader import Downloader
from crosspm.helpers.exceptions import * # noqa
from crosspm.helpers.locker import Locker
from crosspm.helpers.output import Output
from crosspm.helpers.python import get_object_from_string
from crosspm.helpers.usedby import Usedby
app_name = 'CrossPM (Cross Package Manager) version: {version} The MIT License (MIT)'.format(version=version)
def do_run(func):
def wrapper(self, *args, **kwargs):
try:
res = func(self, *args, **kwargs)
except CrosspmExceptionWrongArgs as e:
print(__doc__)
return self.exit(e.error_code, e.msg)
except CrosspmException as e:
return self.exit(e.error_code, e.msg)
except Exception as e:
self._log.exception(e)
return self.exit(CROSSPM_ERRORCODE_UNKNOWN_ERROR, 'Unknown error occurred!')
return 0, res
return wrapper
class CrossPM:
_ready = False
def __init__(self, args=None, throw_exceptions=None, return_result=False):
self._config = None
self._output = None
self._return_result = return_result
if throw_exceptions is None:
# legacy behavior
if self._return_result:
self._throw_exceptions = False
else:
self._throw_exceptions = True
else:
self._throw_exceptions = throw_exceptions
self._log = logging.getLogger('crosspm')
args = self.prepare_args(args)
docopt_str = __doc__.format(app_name=app_name,
verb_level=Config.get_verbosity_level(),
log_default=Config.get_verbosity_level(0, True),
deps_default=CROSSPM_DEPENDENCY_FILENAME,
deps_lock_default=CROSSPM_DEPENDENCY_LOCK_FILENAME,
out_format=Output.get_output_types(),
out_format_default='stdout',
)
self._args = docopt(docopt_str,
argv=args,
version=version)
if self._args['--recursive']:
recursive_str = self._args['--recursive']
if recursive_str.lower() == 'true':
self._args['--recursive'] = True
elif recursive_str.lower() == 'false':
self._args['--recursive'] = False
else:
raise Exception("Unknown value to --recursive: {}".format(recursive_str))
if isinstance(self._args, str):
if self._throw_exceptions:
print(app_name)
print(self._args)
exit()
self._ready = True
if self._args['download']:
self.command_ = Downloader
elif self._args['lock']:
self.command_ = Locker
elif self._args['usedby']:
self.command_ = Usedby
else:
self.command_ = None
@property
def stdout(self):
"""
Флаг --stdout может быть взят из переменной окружения CROSSPM_STDOUT.
Если есть любое значение в CROSSPM_STDOUT - оно понимается как True
:return:
"""
# --stdout
stdout = self._args['--stdout']
if stdout:
return True
# CROSSPM_STDOUT
stdout_env = os.getenv('CROSSPM_STDOUT', None)
if stdout_env is not None:
return True
return False
@staticmethod
def prepare_args(args, windows=None):
"""
Prepare args - add support for old interface, e.g:
- --recursive was "flag" and for now it support True or False value
:param args:
:return:
"""
if windows is None:
windows = "win" in sys.platform
if isinstance(args, str):
args = shlex.split(args, posix=not windows)
elif isinstance(args, list):
pass
elif args is None:
args = sys.argv[1:]
else:
raise Exception("Unknown args type: {}".format(type(args)))
# --recursive => --recursive=True|False convert
for position, argument in enumerate(args):
# Normal way, skip change
if argument.lower() in ('--recursive=true', '--recursive=false'):
return args
elif argument.lower() == '--recursive':
if len(args) > position + 1 and args[position + 1].lower() in ["true", "false"]:
# --recursive true | false
return args
else:
# legacy way, convert --recursive to --recursive=true
args[position] = "--recursive=True"
return args
return args
@do_run
def read_config(self):
_deps_path = self._args['--deps-path']
# Передаём содержимое напрямую
if _deps_path is None and self._args['--dependencies-content'] is not None:
_deps_path = DependenciesContent(self._args['--dependencies-content'])
_depslock_path = self._args['--depslock-path']
if _depslock_path is None and self._args['--dependencies-lock-content'] is not None:
_depslock_path = DependenciesContent(self._args['--dependencies-lock-content'])
if self._args['lock']:
if self._args['DEPS']:
_deps_path = self._args['DEPS']
if self._args['DEPSLOCK']:
_depslock_path = self._args['DEPSLOCK']
self._config = Config(self._args['--config'], self._args['--options'], self._args['--no-fails'], _depslock_path,
_deps_path, self._args['--lock-on-success'],
self._args['--prefer-local'])
self._output = Output(self._config.output('result', None), self._config.name_column, self._config)
def exit(self, code, msg):
self._log.critical(msg)
if self._throw_exceptions:
sys.exit(code)
else:
return code, msg
@property
def recursive(self):
if self.command_ is Downloader:
if self._args['--recursive'] is None:
recursive = True
else:
recursive = self._args['--recursive']
else:
if self._args['--recursive'] is None:
recursive = False
else:
recursive = self._args['--recursive']
return recursive
@do_run
def check_common_args(self):
if self._args['--output']:
output = self._args['--output'].strip().strip("'").strip('"')
output_abs = os.path.abspath(output)
if os.path.isdir(output_abs):
raise CrosspmExceptionWrongArgs(
'"%s" is a directory - can\'t write to it'
)
self._args['--output'] = output
@do_run
def set_logging_level(self):
level_str = self._args['--verbose'].strip().lower()
log = self._args['--log']
if log:
log = log.strip().strip("'").strip('"')
log_abs = os.path.abspath(log)
if os.path.isdir(log_abs):
raise CrosspmExceptionWrongArgs(
'"%s" is a directory - can\'t write log to it'
)
else:
log_dir = os.path.dirname(log_abs)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
else:
log_abs = None
level = Config.get_verbosity_level(level_str or 'console')
self._log.handlers = []
if level or log_abs:
self._log.setLevel(level)
format_str = '%(asctime)-19s [%(levelname)-9s] %(message)s'
if level_str == 'debug':
format_str = '%(asctime)-19s [%(levelname)-9s] %(name)-12s: %(message)s'
formatter = logging.Formatter(format_str, datefmt="%Y-%m-%d %H:%M:%S")
if level:
# legacy way - Cmake catch message from stdout and parse PACKAGE_ROOT
# So, crosspm print debug and info message to stderr for debug purpose
if not self.stdout:
sh = logging.StreamHandler(stream=sys.stderr)
sh.setLevel(level)
self._log.addHandler(sh)
# If --stdout flag enabled
else:
sh = logging.StreamHandler(stream=sys.stderr)
sh.setLevel(logging.WARNING)
self._log.addHandler(sh)
sh = logging.StreamHandler(stream=sys.stdout)
sh.setLevel(level)
self._log.addHandler(sh)
if log_abs:
if not level_str:
level = Config.get_verbosity_level(0)
fh = logging.FileHandler(filename=log_abs)
fh.setLevel(level)
fh.setFormatter(formatter)
self._log.addHandler(fh)
def run(self):
time_start = time.time()
if self._ready:
errorcode, msg = self.set_logging_level()
self._log.info(app_name)
errorcode, msg = self.check_common_args()
if errorcode == 0:
errorcode, msg = self.read_config()
if errorcode == 0:
if self._args['download']:
errorcode, msg = self.command(self.command_)
elif self._args['lock']:
errorcode, msg = self.command(self.command_)
elif self._args['usedby']:
errorcode, msg = self.command(self.command_)
elif self._args['pack']:
errorcode, msg = self.pack()
elif self._args['cache']:
errorcode, msg = self.cache()
else:
errorcode, msg = CROSSPM_ERRORCODE_WRONG_ARGS, self._args
time_end = time.time()
self._log.info('Done in %2.2f sec' % (time_end - time_start))
return errorcode, msg
@do_run
def command(self, command_):
if self._return_result:
params = {}
else:
if self._args['--out-format'] == 'stdout':
if self._args['--output']:
raise CrosspmExceptionWrongArgs(
"unwanted argument '--output' while argument '--out-format={}'".format(
self._args['--out-format'],
))
elif not self._args['--output']:
raise CrosspmExceptionWrongArgs(
"argument '--output' required when argument '--out-format={}'".format(
self._args['--out-format'],
))
params = {
'out_format': ['--out-format', ''],
'output': ['--output', ''],
'output_template': ['--output-template', ''],
# 'out_prefix': ['--out-prefix', ''],
# 'depslock_path': ['--depslock-path', ''],
}
for k, v in params.items():
params[k] = self._args[v[0]] if v[0] in self._args else v[1]
if isinstance(params[k], str):
params[k] = params[k].strip('"').strip("'")
# try to dynamic load --output-template from python module
output_template = params['output_template']
if output_template:
# Try to load from python module
module_template = get_object_from_string(output_template)
if module_template is not None:
self._log.debug(
"Found output template path '{}' from '{}'".format(module_template, output_template))
params['output_template'] = module_template
else:
self._log.debug("Output template '{}' use like file path".format(output_template))
# check template exist
output_template = params['output_template']
if output_template and not os.path.exists(output_template):
raise CrosspmException(CROSSPM_ERRORCODE_CONFIG_NOT_FOUND,
"Can not find template '{}'".format(output_template))
do_load = not self._args['--list']
# hack for Locker
if command_ is Locker:
do_load = self.recursive
cpm_ = command_(self._config, do_load, self.recursive)
cpm_.entrypoint()
if self._return_result:
return self._return(cpm_)
else:
# self._output.write(params, packages)
self._output.write_output(params, cpm_.get_tree_packages())
return ''
def _return(self, cpm_downloader):
if str(self._return_result).lower() == 'raw':
return cpm_downloader.get_raw_packages()
if str(self._return_result).lower() == 'tree':
return cpm_downloader.get_tree_packages()
else:
return self._output.output_type_module(cpm_downloader.get_tree_packages())
@do_run
def pack(self):
Archive.create(self._args['<OUT>'], self._args['<SOURCE>'])
@do_run
def cache(self):
if self._args['clear']:
self._config.cache.clear(self._args['hard'])
elif self._args['size']:
self._config.cache.size()
elif self._args['age']:
self._config.cache.age()
else:
self._config.cache.info()
if __name__ == '__main__':
app = CrossPM()
app.run()
| mit | -9,117,494,264,452,563,000 | 38.105516 | 132 | 0.526277 | false | 4.292445 | true | false | false | 0.001349 |
tectronics/agpy | AG_fft_tools/upsample.py | 6 | 5095 | import fast_ffts
import warnings
import numpy as np
import shift
def dftups(inp,nor=None,noc=None,usfac=1,roff=0,coff=0):
"""
*translated from matlab*
http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation/content/html/efficient_subpixel_registration.html
Upsampled DFT by matrix multiplies, can compute an upsampled DFT in just
a small region.
usfac Upsampling factor (default usfac = 1)
[nor,noc] Number of pixels in the output upsampled DFT, in
units of upsampled pixels (default = size(in))
roff, coff Row and column offsets, allow to shift the output array to
a region of interest on the DFT (default = 0)
Recieves DC in upper left corner, image center must be in (1,1)
Manuel Guizar - Dec 13, 2007
Modified from dftus, by J.R. Fienup 7/31/06
This code is intended to provide the same result as if the following
operations were performed
- Embed the array "in" in an array that is usfac times larger in each
dimension. ifftshift to bring the center of the image to (1,1).
- Take the FFT of the larger array
- Extract an [nor, noc] region of the result. Starting with the
[roff+1 coff+1] element.
It achieves this result by computing the DFT in the output array without
the need to zeropad. Much faster and memory efficient than the
zero-padded FFT approach if [nor noc] are much smaller than [nr*usfac nc*usfac]
"""
# this function is translated from matlab, so I'm just going to pretend
# it is matlab/pylab
from numpy.fft import ifftshift
from numpy import pi,newaxis,floor
nr,nc=np.shape(inp);
# Set defaults
if noc is None: noc=nc;
if nor is None: nor=nr;
# Compute kernels and obtain DFT by matrix products
kernc=np.exp((-1j*2*pi/(nc*usfac))*( ifftshift(np.arange(nc) - floor(nc/2)).T[:,newaxis] )*( np.arange(noc) - coff )[newaxis,:]);
kernr=np.exp((-1j*2*pi/(nr*usfac))*( np.arange(nor).T - roff )[:,newaxis]*( ifftshift(np.arange(nr)) - floor(nr/2) )[newaxis,:]);
#kernc=exp((-i*2*pi/(nc*usfac))*( ifftshift([0:nc-1]).' - floor(nc/2) )*( [0:noc-1] - coff ));
#kernr=exp((-i*2*pi/(nr*usfac))*( [0:nor-1].' - roff )*( ifftshift([0:nr-1]) - floor(nr/2) ));
out=np.dot(np.dot(kernr,inp),kernc);
#return np.roll(np.roll(out,-1,axis=0),-1,axis=1)
return out
def upsample_image(image, upsample_factor=1, output_size=None, nthreads=1, use_numpy_fft=False,
xshift=0, yshift=0):
"""
Use dftups to upsample an image (but takes an image and returns an image with all reals)
"""
fftn,ifftn = fast_ffts.get_ffts(nthreads=nthreads, use_numpy_fft=use_numpy_fft)
imfft = ifftn(image)
if output_size is None:
s1 = image.shape[0]*upsample_factor
s2 = image.shape[1]*upsample_factor
elif hasattr(output_size,'__len__'):
s1 = output_size[0]
s2 = output_size[1]
else:
s1 = output_size
s2 = output_size
ups = dftups(imfft, s1, s2, upsample_factor, roff=yshift, coff=xshift)
return np.abs(ups)
def dftups1d(inp,nor=None,usfac=1,roff=0):
"""
1D upsampling... not exactly dft becuase I still don't understand it =(
"""
# this function is translated from matlab, so I'm just going to pretend
# it is matlab/pylab
from numpy.fft import ifftshift
#from numpy import pi,newaxis,floor
from scipy.signal import resample
nr=np.size(inp);
newsize = nr * usfac
#shifted = shift(inp, roff, mode='wrap')
shifted = shift.shift1d(inp,roff)
ups = resample(shifted.astype('float'),newsize)
lolim = nr/2-nr/2
uplim = nr/2+nr/2
# I think it would always have to be wrong on the upper side
if uplim-lolim > nr:
uplim -= 1
elif uplim-lolim < nr:
uplim += 1
if uplim - lolim != nr: raise ValueError('impossible?')
out = ups[lolim:uplim]
#oldx = np.arange(nr)
#newx = np.linspace(nr/2.-nr/2./usfac+roff/usfac,nr/2.+nr/2./usfac+roff/usfac,nr)
#oldx = np.linspace(0,1,nr)
#newx = np.linspace(0,1,newsize)
#inshift = shift.shift1d(inp,roff)
#out = ups = np.interp(newx,oldx,np.real(inp))
#lolim = newsize/2+roff*usfac-nr/2
#uplim = newsize/2+roff*usfac+nr/2
#out = ups[lolim:uplim]
# Set defaults
#if nor is None: nor=nr;
# Compute kernels and obtain DFT by matrix products
#kernc=np.exp((-1j*2*pi/(nc*usfac))*( ifftshift(np.arange(nc) - floor(nc/2)).T[:,newaxis] )*( np.arange(noc) - coff )[newaxis,:]);
#kernr=np.exp((-1j*2*pi/(nr*usfac))*( np.arange(nor).T - roff )[:,newaxis]*( ifftshift(np.arange(nr)) - floor(nr/2) )[newaxis,:]);
#kernc=np.ones(nr,dtype='float')/float(nr)
#kernc=exp((-i*2*pi/(nc*usfac))*( ifftshift([0:nc-1]).' - floor(nc/2) )*( [0:noc-1] - coff ));
#kernr=exp((-i*2*pi/(nr*usfac))*( [0:nor-1].' - roff )*( ifftshift([0:nr-1]) - floor(nr/2) ));
#out=np.dot(kernr,inp)
#return np.roll(np.roll(out,-1,axis=0),-1,axis=1)
return out
| mit | -1,773,074,694,105,784,800 | 40.762295 | 170 | 0.637291 | false | 2.87366 | false | false | false | 0.017861 |
pfmoore/invoke | tests/loader.py | 1 | 2601 | import imp
import os
import sys
from spec import Spec, eq_, raises
from invoke.loader import Loader, FilesystemLoader as FSLoader
from invoke.collection import Collection
from invoke.exceptions import CollectionNotFound
from _util import support
class _BasicLoader(Loader):
"""
Tests top level Loader behavior with basic finder stub.
Used when we want to make sure we're testing Loader.load and not e.g.
FilesystemLoader's specific implementation.
"""
def find(self, name):
self.fd, self.path, self.desc = t = imp.find_module(name, [support])
return t
class Loader_(Spec):
def adds_module_parent_dir_to_sys_path(self):
# Crummy doesn't-explode test.
_BasicLoader().load('namespacing')
def doesnt_dupliate_parent_dir_addition(self):
_BasicLoader().load('namespacing')
_BasicLoader().load('namespacing')
# If the bug is present, this will be 2 at least (and often more, since
# other tests will pollute it (!).
eq_(sys.path.count(support), 1)
def closes_opened_file_object(self):
loader = _BasicLoader()
loader.load('foo')
assert loader.fd.closed
def can_load_package(self):
loader = _BasicLoader()
# make sure it doesn't explode
loader.load('package')
class FilesystemLoader_(Spec):
def setup(self):
self.l = FSLoader(start=support)
def exposes_discovery_start_point(self):
start = '/tmp/'
eq_(FSLoader(start=start).start, start)
def has_a_default_discovery_start_point(self):
eq_(FSLoader().start, os.getcwd())
def returns_collection_object_if_name_found(self):
result = self.l.load('foo')
eq_(type(result), Collection)
@raises(CollectionNotFound)
def raises_CollectionNotFound_if_not_found(self):
self.l.load('nope')
@raises(ImportError)
def raises_ImportError_if_found_collection_cannot_be_imported(self):
# Instead of masking with a CollectionNotFound
self.l.load('oops')
def searches_towards_root_of_filesystem(self):
# Loaded while root is in same dir as .py
directly = self.l.load('foo')
# Loaded while root is multiple dirs deeper than the .py
deep = os.path.join(support, 'ignoreme', 'ignoremetoo')
indirectly = FSLoader(start=deep).load('foo')
eq_(directly, indirectly)
def defaults_to_tasks_collection(self):
"defaults to 'tasks' collection"
result = FSLoader(start=support + '/implicit/').load()
eq_(type(result), Collection)
| bsd-2-clause | 5,900,820,417,299,214,000 | 29.964286 | 79 | 0.653595 | false | 3.808199 | false | false | false | 0.000384 |
wanghaven/readthedocs.org | readthedocs/rtd_tests/tests/test_urls.py | 32 | 3527 | from django.core.urlresolvers import reverse
from django.core.urlresolvers import NoReverseMatch
from django.test import TestCase
from readthedocs.builds.constants import LATEST
import readthedocs.core.views
class SubdomainUrlTests(TestCase):
def test_sub_index(self):
url = reverse(readthedocs.core.views.redirect_project_slug,
urlconf='readthedocs.core.subdomain_urls')
self.assertEqual(url, '/')
def test_sub_lang_version(self):
url = reverse('docs_detail', urlconf='readthedocs.core.subdomain_urls',
kwargs={'lang_slug': 'en', 'version_slug': LATEST})
self.assertEqual(url, '/en/latest/')
def test_sub_lang_version_filename(self):
url = reverse('docs_detail', urlconf='readthedocs.core.subdomain_urls',
args=['en', 'latest', 'index.html'])
self.assertEqual(url, '/en/latest/index.html')
def test_sub_project_full_path(self):
url = reverse('subproject_docs_detail',
urlconf='readthedocs.core.subdomain_urls',
kwargs={'project_slug':'pyramid', 'lang_slug': 'en',
'version_slug': LATEST, 'filename': 'index.html'})
self.assertEqual(url, '/projects/pyramid/en/latest/index.html')
def test_sub_project_slug_only(self):
url = reverse('subproject_docs_detail',
urlconf='readthedocs.core.subdomain_urls',
kwargs={'project_slug': 'pyramid'})
self.assertEqual(url, '/projects/pyramid')
def test_sub_page(self):
url = reverse('docs_detail',
urlconf='readthedocs.core.subdomain_urls',
kwargs={'filename': 'install.html'})
self.assertEqual(url, '/page/install.html')
def test_sub_version(self):
url = reverse('version_subdomain_handler',
urlconf='readthedocs.core.subdomain_urls',
kwargs={'version_slug': '1.4.1'})
self.assertEqual(url, '/1.4.1/')
def test_sub_lang(self):
url = reverse('lang_subdomain_handler',
urlconf='readthedocs.core.subdomain_urls',
kwargs={'lang_slug': 'en'})
self.assertEqual(url, '/en/')
class WipeUrlTests(TestCase):
def test_wipe_no_params(self):
try:
reverse('wipe_version')
self.fail('reverse with no parameters should fail')
except NoReverseMatch:
pass
def test_wipe_alphabetic(self):
project_slug = 'alphabetic'
version = 'version'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alphabetic/version/')
def test_wipe_alphanumeric(self):
project_slug = 'alpha123'
version = '123alpha'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alpha123/123alpha/')
def test_wipe_underscore_hyphen(self):
project_slug = 'alpha_123'
version = '123-alpha'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alpha_123/123-alpha/')
def test_wipe_version_dot(self):
project_slug = 'alpha-123'
version = '1.2.3'
url = reverse('wipe_version', args=[project_slug, version])
self.assertEqual(url, '/wipe/alpha-123/1.2.3/')
def test_wipe_version_start_dot(self):
project_slug = 'alpha-123'
version = '.2.3'
try:
reverse('wipe_version', args=[project_slug, version])
except NoReverseMatch:
pass
| mit | -5,207,685,566,183,546,000 | 35.739583 | 79 | 0.617238 | false | 3.821235 | true | false | false | 0.004253 |
gpfreitas/bokeh | bokeh/util/notebook.py | 2 | 2533 | """ Functions useful for loading Bokeh code and data in IPython notebooks.
"""
from __future__ import absolute_import
_notebook_loaded = None
def load_notebook(resources=None, verbose=False, hide_banner=False):
""" Prepare the IPython notebook for displaying Bokeh plots.
Args:
resources (Resource, optional) :
how and where to load BokehJS from
verbose (bool, optional) :
whether to report detailed settings (default: False)
hide_banner (bool, optional):
whether to hide the Bokeh banner (default: False)
.. warning::
Clearing the output cell containing the published BokehJS
resources HTML code may cause Bokeh CSS styling to be removed.
Returns:
None
"""
global _notebook_loaded
from .. import __version__
from ..resources import INLINE
from ..templates import NOTEBOOK_LOAD
if resources is None:
resources = INLINE
if resources.mode == 'inline':
js_info = 'inline'
css_info = 'inline'
else:
js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files
css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files
warnings = ["Warning: " + msg['text'] for msg in resources.messages if msg['type'] == 'warn']
if _notebook_loaded and verbose:
warnings.append('Warning: BokehJS previously loaded')
_notebook_loaded = resources
html = NOTEBOOK_LOAD.render(
bokeh_js=resources.render_js(),
bokeh_css=resources.render_css(),
logo_url=resources.logo_url,
verbose=verbose,
js_info=js_info,
css_info=css_info,
bokeh_version=__version__,
warnings=warnings,
hide_banner=hide_banner,
)
publish_display_data({'text/html': html})
def publish_display_data(data, source='bokeh'):
""" Compatibility wrapper for IPython ``publish_display_data``
Later versions of IPython remove the ``source`` (first) argument. This
function insulates Bokeh library code from this change.
Args:
source (str, optional) : the source arg for IPython (default: "bokeh")
data (dict) : the data dict to pass to ``publish_display_data``
Typically has the form ``{'text/html': html}``
"""
import IPython.core.displaypub as displaypub
try:
displaypub.publish_display_data(source, data)
except TypeError:
displaypub.publish_display_data(data)
| bsd-3-clause | -5,252,546,689,417,840,000 | 29.518072 | 99 | 0.645085 | false | 4.112013 | false | false | false | 0.001184 |
bnaul/scikit-learn | examples/manifold/plot_compare_methods.py | 13 | 2823 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`sphx_glr_auto_examples_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from collections import OrderedDict
from functools import partial
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
# Create figure
fig = plt.figure(figsize=(15, 8))
fig.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
# Add 3d scatter plot
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
# Set-up manifold methods
LLE = partial(manifold.LocallyLinearEmbedding,
n_neighbors, n_components, eigen_solver='auto')
methods = OrderedDict()
methods['LLE'] = LLE(method='standard')
methods['LTSA'] = LLE(method='ltsa')
methods['Hessian LLE'] = LLE(method='hessian')
methods['Modified LLE'] = LLE(method='modified')
methods['Isomap'] = manifold.Isomap(n_neighbors, n_components)
methods['MDS'] = manifold.MDS(n_components, max_iter=100, n_init=1)
methods['SE'] = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
methods['t-SNE'] = manifold.TSNE(n_components=n_components, init='pca',
random_state=0)
# Plot results
for i, (label, method) in enumerate(methods.items()):
t0 = time()
Y = method.fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (label, t1 - t0))
ax = fig.add_subplot(2, 5, 2 + i + (i > 3))
ax.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
ax.set_title("%s (%.2g sec)" % (label, t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis('tight')
plt.show()
| bsd-3-clause | -5,612,965,698,459,852,000 | 33.012048 | 82 | 0.679419 | false | 3.317274 | false | false | false | 0.002834 |
spark0001/spark2.1.1 | examples/src/main/python/ml/stopwords_remover_example.py | 123 | 1434 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import StopWordsRemover
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("StopWordsRemoverExample")\
.getOrCreate()
# $example on$
sentenceData = spark.createDataFrame([
(0, ["I", "saw", "the", "red", "balloon"]),
(1, ["Mary", "had", "a", "little", "lamb"])
], ["id", "raw"])
remover = StopWordsRemover(inputCol="raw", outputCol="filtered")
remover.transform(sentenceData).show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 | 4,196,657,525,310,785,000 | 33.97561 | 74 | 0.698745 | false | 3.865229 | false | false | false | 0 |
ndardenne/pymatgen | pymatgen/io/abinit/scheduler_error_handlers.py | 11 | 3632 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
"""
Error handlers for errors originating from the Submission systems.
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
from pymatgen.io.abinit.scheduler_error_parsers import get_parser
try:
from custodian.custodian import ErrorHandler
except ImportError:
ErrorHandler = object
# TODO (from SP): Pls move this somewhere else. Custodian and Workflow stuff
# really shouldn't be in pymatgen.
class SchedulerErrorHandler(ErrorHandler):
"""
Custodian error handler for scheduler related errors
scheduler_adapter takes the scheduler, it should at least provide a .name attribute indentifying the scheduler,
currently 'slurm' is supported.
If the scheduler adapter also provides the methods defined in CorrectorProtocolScheduler, problems can also be
fixed by .apply_corrections.
If a application_adapter is also provided and it provides the methods defined in CorrectorProtocolApplication
problems can also be fixed a the level of the application, e.g. making the application require less memory.
"""
def __init__(self, scheduler_adapter, application_adapter=None, err_file='queue.err', out_file='queue.out',
run_err_file='run.err', batch_err_file='batch.err'):
self.scheduler_adapter = scheduler_adapter
self.application_adapter = application_adapter
self.err_file = err_file
self.out_file = out_file
self.run_err_file = run_err_file
self.batch_err_file = batch_err_file
self.errors = []
self.corrections = {}
def check(self):
"""
Check for the defined errors, put all found errors in self.errors, return True if any were found False if no
errors were found
"""
parser = get_parser(self.scheduler_adapter.name, err_file=self.err_file, out_file=self.out_file,
run_err_file=self.run_err_file, batch_err_file=self.batch_err_file)
parser.parse()
self.errors = parser.errors
if len(self.errors) == 0:
return False
else:
return True
def correct(self):
"""
For custodian compatibility
"""
self.return_corrections()
def return_corrections(self):
for error in self.errors:
self.corrections.update({error: {'scheduler_adapter_solutions': [], 'aplication_adapter_solutions': []}})
self.corrections[error]['scheduler_adapter_solutions'].append(error.scheduler_adapter_solutions)
self.corrections[error]['application_adapter_solutions'].append(error.application_adapter_solutions)
return self.corrections
def apply_corrections(self):
"""
Method to directly apply the corrections.
"""
for error in self.errors:
for solution in error.scheduler_adapter_solutions:
if self.scheduler_adapter is not None:
if self.scheduler_adapter.__getattribut__(solution[0].__name__)(solution[1]):
return True
for solution in error.application_adapter_solutions:
if self.application_adapter is not None:
if self.application_adapter.__getattribut__(solution[0].__name__)(solution[1]):
return True
return False
| mit | -7,173,307,719,900,468,000 | 39.808989 | 117 | 0.652808 | false | 4.208575 | false | false | false | 0.003579 |
EclecticIQ/OpenTAXII | opentaxii/taxii/services/collection_management.py | 2 | 2914 | from libtaxii.constants import (
SVC_COLLECTION_MANAGEMENT,
MSG_COLLECTION_INFORMATION_REQUEST, MSG_FEED_INFORMATION_REQUEST,
MSG_MANAGE_COLLECTION_SUBSCRIPTION_REQUEST,
MSG_MANAGE_FEED_SUBSCRIPTION_REQUEST,
)
from .abstract import TAXIIService
from .handlers import (
CollectionInformationRequestHandler,
SubscriptionRequestHandler
)
class CollectionManagementService(TAXIIService):
handlers = {
MSG_COLLECTION_INFORMATION_REQUEST:
CollectionInformationRequestHandler,
MSG_FEED_INFORMATION_REQUEST:
CollectionInformationRequestHandler,
}
subscription_handlers = {
MSG_MANAGE_COLLECTION_SUBSCRIPTION_REQUEST:
SubscriptionRequestHandler,
MSG_MANAGE_FEED_SUBSCRIPTION_REQUEST:
SubscriptionRequestHandler
}
service_type = SVC_COLLECTION_MANAGEMENT
subscription_message = "Default subscription message"
subscription_supported = True
def __init__(self, subscription_supported=True, subscription_message=None,
**kwargs):
super(CollectionManagementService, self).__init__(**kwargs)
self.subscription_message = subscription_message
self.subscription_supported = subscription_supported
if self.subscription_supported:
self.handlers = dict(CollectionManagementService.handlers)
self.handlers.update(
CollectionManagementService.subscription_handlers)
@property
def advertised_collections(self):
return self.server.persistence.get_collections(self.id)
def get_collection(self, name):
return self.server.persistence.get_collection(name, self.id)
def get_push_methods(self, collection):
# Push delivery is not implemented
pass
def get_polling_services(self, collection):
return self.server.get_services_for_collection(collection, 'poll')
def get_subscription_services(self, collection):
services = []
all_services = self.server.get_services_for_collection(
collection, 'collection_management')
for s in all_services:
if s.subscription_supported:
services.append(s)
return services
def create_subscription(self, subscription):
subscription.subscription_id = self.generate_id()
return self.server.persistence.create_subscription(subscription)
def get_subscription(self, subscription_id):
return self.server.persistence.get_subscription(subscription_id)
def get_subscriptions(self):
return self.server.persistence.get_subscriptions(service_id=self.id)
def update_subscription(self, subscription):
return self.server.persistence.update_subscription(subscription)
def get_receiving_inbox_services(self, collection):
return self.server.get_services_for_collection(collection, 'inbox')
| bsd-3-clause | -2,920,765,497,369,848,300 | 33.690476 | 78 | 0.703844 | false | 4.625397 | false | false | false | 0 |
sbbm/llvm | test/CodeGen/SystemZ/Large/branch-range-11.py | 13 | 4057 | # Test 32-bit COMPARE LOGICAL IMMEDIATE AND BRANCH in cases where the sheer
# number of instructions causes some branches to be out of range.
# RUN: python %s | llc -mtriple=s390x-linux-gnu | FileCheck %s
# Construct:
#
# before0:
# conditional branch to after0
# ...
# beforeN:
# conditional branch to after0
# main:
# 0xffc6 bytes, from MVIY instructions
# conditional branch to main
# after0:
# ...
# conditional branch to main
# afterN:
#
# Each conditional branch sequence occupies 14 bytes if it uses a short
# branch and 20 if it uses a long one. The ones before "main:" have to
# take the branch length into account, which is 6 for short branches,
# so the final (0x3a - 6) / 14 == 3 blocks can use short branches.
# The ones after "main:" do not, so the first 0x3a / 14 == 4 blocks
# can use short branches. The conservative algorithm we use makes
# one of the forward branches unnecessarily long, as noted in the
# check output below.
#
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 50
# CHECK: jgl [[LABEL:\.L[^ ]*]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 51
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 52
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 53
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 54
# CHECK: jgl [[LABEL]]
# ...as mentioned above, the next one could be a CLIJL instead...
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 55
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 56, [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 57, [[LABEL]]
# ...main goes here...
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 100, [[LABEL:\.L[^ ]*]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 101, [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 102, [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clijl [[REG]], 103, [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 104
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 105
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 106
# CHECK: jgl [[LABEL]]
# CHECK: l [[REG:%r[0-5]]], 0(%r3)
# CHECK: s [[REG]], 0(%r4)
# CHECK: clfi [[REG]], 107
# CHECK: jgl [[LABEL]]
branch_blocks = 8
main_size = 0xffc6
print 'define void @f1(i8 *%base, i32 *%stopa, i32 *%stopb) {'
print 'entry:'
print ' br label %before0'
print ''
for i in xrange(branch_blocks):
next = 'before%d' % (i + 1) if i + 1 < branch_blocks else 'main'
print 'before%d:' % i
print ' %%bcur%da = load i32 *%%stopa' % i
print ' %%bcur%db = load i32 *%%stopb' % i
print ' %%bsub%d = sub i32 %%bcur%da, %%bcur%db' % (i, i, i)
print ' %%btest%d = icmp ult i32 %%bsub%d, %d' % (i, i, i + 50)
print ' br i1 %%btest%d, label %%after0, label %%%s' % (i, next)
print ''
print '%s:' % next
a, b = 1, 1
for i in xrange(0, main_size, 6):
a, b = b, a + b
offset = 4096 + b % 500000
value = a % 256
print ' %%ptr%d = getelementptr i8 *%%base, i64 %d' % (i, offset)
print ' store volatile i8 %d, i8 *%%ptr%d' % (value, i)
for i in xrange(branch_blocks):
print ' %%acur%da = load i32 *%%stopa' % i
print ' %%acur%db = load i32 *%%stopb' % i
print ' %%asub%d = sub i32 %%acur%da, %%acur%db' % (i, i, i)
print ' %%atest%d = icmp ult i32 %%asub%d, %d' % (i, i, i + 100)
print ' br i1 %%atest%d, label %%main, label %%after%d' % (i, i)
print ''
print 'after%d:' % i
print ' ret void'
print '}'
| gpl-3.0 | -4,128,124,639,201,441,000 | 30.944882 | 75 | 0.555829 | false | 2.466261 | false | false | false | 0 |
gautamkmr/incubator-mxnet | example/image-classification/symbols/inception-v4.py | 57 | 8706 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding:utf-8 -*-
__author__ = 'zhangshuai'
modified_date = '16/7/5'
__modify__ = 'anchengwu'
modified_date = '17/2/22'
'''
Inception v4 , suittable for image with around 299 x 299
Reference:
Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke
arXiv.1602.07261
'''
import mxnet as mx
import numpy as np
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=''):
conv = mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=True, name='%s%s_conv2d' %(name, suffix))
bn = mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' %(name, suffix), fix_gamma=True)
act = mx.sym.Activation(data=bn, act_type='relu', name='%s%s_relu' %(name, suffix))
return act
def Inception_stem(data, name= None):
c = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name='%s_conv1_3*3' %name)
c = Conv(c, 32, kernel=(3, 3), name='%s_conv2_3*3' %name)
c = Conv(c, 64, kernel=(3, 3), pad=(1, 1), name='%s_conv3_3*3' %name)
p1 = mx.sym.Pooling(c, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_1' %name)
c2 = Conv(c, 96, kernel=(3, 3), stride=(2, 2), name='%s_conv4_3*3' %name)
concat = mx.sym.Concat(*[p1, c2], name='%s_concat_1' %name)
c1 = Conv(concat, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv5_1*1' %name)
c1 = Conv(c1, 96, kernel=(3, 3), name='%s_conv6_3*3' %name)
c2 = Conv(concat, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv7_1*1' %name)
c2 = Conv(c2, 64, kernel=(7, 1), pad=(3, 0), name='%s_conv8_7*1' %name)
c2 = Conv(c2, 64, kernel=(1, 7), pad=(0, 3), name='%s_conv9_1*7' %name)
c2 = Conv(c2, 96, kernel=(3, 3), pad=(0, 0), name='%s_conv10_3*3' %name)
concat = mx.sym.Concat(*[c1, c2], name='%s_concat_2' %name)
c1 = Conv(concat, 192, kernel=(3, 3), stride=(2, 2), name='%s_conv11_3*3' %name)
p1 = mx.sym.Pooling(concat, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_2' %name)
concat = mx.sym.Concat(*[c1, p1], name='%s_concat_3' %name)
return concat
def InceptionA(input, name=None):
p1 = mx.sym.Pooling(input, kernel=(3, 3), pad=(1, 1), pool_type='avg', name='%s_avgpool_1' %name)
c1 = Conv(p1, 96, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name)
c2 = Conv(input, 96, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name)
c3 = Conv(input, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name)
c3 = Conv(c3, 96, kernel=(3, 3), pad=(1, 1), name='%s_conv4_3*3' %name)
c4 = Conv(input, 64, kernel=(1, 1), pad=(0, 0), name='%s_conv5_1*1' % name)
c4 = Conv(c4, 96, kernel=(3, 3), pad=(1, 1), name='%s_conv6_3*3' % name)
c4 = Conv(c4, 96, kernel=(3, 3), pad=(1, 1), name='%s_conv7_3*3' %name)
concat = mx.sym.Concat(*[c1, c2, c3, c4], name='%s_concat_1' %name)
return concat
def ReductionA(input, name=None):
p1 = mx.sym.Pooling(input, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_1' %name)
c2 = Conv(input, 384, kernel=(3, 3), stride=(2, 2), name='%s_conv1_3*3' %name)
c3 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name)
c3 = Conv(c3, 224, kernel=(3, 3), pad=(1, 1), name='%s_conv3_3*3' %name)
c3 = Conv(c3, 256, kernel=(3, 3), stride=(2, 2), pad=(0, 0), name='%s_conv4_3*3' %name)
concat = mx.sym.Concat(*[p1, c2, c3], name='%s_concat_1' %name)
return concat
def InceptionB(input, name=None):
p1 = mx.sym.Pooling(input, kernel=(3, 3), pad=(1, 1), pool_type='avg', name='%s_avgpool_1' %name)
c1 = Conv(p1, 128, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name)
c2 = Conv(input, 384, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name)
c3 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name)
c3 = Conv(c3, 224, kernel=(1, 7), pad=(0, 3), name='%s_conv4_1*7' %name)
#paper wrong
c3 = Conv(c3, 256, kernel=(7, 1), pad=(3, 0), name='%s_conv5_1*7' %name)
c4 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv6_1*1' %name)
c4 = Conv(c4, 192, kernel=(1, 7), pad=(0, 3), name='%s_conv7_1*7' %name)
c4 = Conv(c4, 224, kernel=(7, 1), pad=(3, 0), name='%s_conv8_7*1' %name)
c4 = Conv(c4, 224, kernel=(1, 7), pad=(0, 3), name='%s_conv9_1*7' %name)
c4 = Conv(c4, 256, kernel=(7, 1), pad=(3, 0), name='%s_conv10_7*1' %name)
concat = mx.sym.Concat(*[c1, c2, c3, c4], name='%s_concat_1' %name)
return concat
def ReductionB(input,name=None):
p1 = mx.sym.Pooling(input, kernel=(3, 3), stride=(2, 2), pool_type='max', name='%s_maxpool_1' %name)
c2 = Conv(input, 192, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name)
c2 = Conv(c2, 192, kernel=(3, 3), stride=(2, 2), name='%s_conv2_3*3' %name)
c3 = Conv(input, 256, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name)
c3 = Conv(c3, 256, kernel=(1, 7), pad=(0, 3), name='%s_conv4_1*7' %name)
c3 = Conv(c3, 320, kernel=(7, 1), pad=(3, 0), name='%s_conv5_7*1' %name)
c3 = Conv(c3, 320, kernel=(3, 3), stride=(2, 2), name='%s_conv6_3*3' %name)
concat = mx.sym.Concat(*[p1, c2, c3], name='%s_concat_1' %name)
return concat
def InceptionC(input, name=None):
p1 = mx.sym.Pooling(input, kernel=(3, 3), pad=(1, 1), pool_type='avg', name='%s_avgpool_1' %name)
c1 = Conv(p1, 256, kernel=(1, 1), pad=(0, 0), name='%s_conv1_1*1' %name)
c2 = Conv(input, 256, kernel=(1, 1), pad=(0, 0), name='%s_conv2_1*1' %name)
c3 = Conv(input, 384, kernel=(1, 1), pad=(0, 0), name='%s_conv3_1*1' %name)
c3_1 = Conv(c3, 256, kernel=(1, 3), pad=(0, 1), name='%s_conv4_3*1' %name)
c3_2 = Conv(c3, 256, kernel=(3, 1), pad=(1, 0), name='%s_conv5_1*3' %name)
c4 = Conv(input, 384, kernel=(1, 1), pad=(0, 0), name='%s_conv6_1*1' %name)
c4 = Conv(c4, 448, kernel=(1, 3), pad=(0, 1), name='%s_conv7_1*3' %name)
c4 = Conv(c4, 512, kernel=(3, 1), pad=(1, 0), name='%s_conv8_3*1' %name)
c4_1 = Conv(c4, 256, kernel=(3, 1), pad=(1, 0), name='%s_conv9_1*3' %name)
c4_2 = Conv(c4, 256, kernel=(1, 3), pad=(0, 1), name='%s_conv10_3*1' %name)
concat = mx.sym.Concat(*[c1, c2, c3_1, c3_2, c4_1, c4_2], name='%s_concat' %name)
return concat
def get_symbol(num_classes=1000, dtype='float32', **kwargs):
data = mx.sym.Variable(name="data")
if dtype == 'float32':
data = mx.sym.identity(data=data, name='id')
else:
if dtype == 'float16':
data = mx.sym.Cast(data=data, dtype=np.float16)
x = Inception_stem(data, name='in_stem')
#4 * InceptionA
# x = InceptionA(x, name='in1A')
# x = InceptionA(x, name='in2A')
# x = InceptionA(x, name='in3A')
# x = InceptionA(x, name='in4A')
for i in range(4):
x = InceptionA(x, name='in%dA' %(i+1))
#Reduction A
x = ReductionA(x, name='re1A')
#7 * InceptionB
# x = InceptionB(x, name='in1B')
# x = InceptionB(x, name='in2B')
# x = InceptionB(x, name='in3B')
# x = InceptionB(x, name='in4B')
# x = InceptionB(x, name='in5B')
# x = InceptionB(x, name='in6B')
# x = InceptionB(x, name='in7B')
for i in range(7):
x = InceptionB(x, name='in%dB' %(i+1))
#ReductionB
x = ReductionB(x, name='re1B')
#3 * InceptionC
# x = InceptionC(x, name='in1C')
# x = InceptionC(x, name='in2C')
# x = InceptionC(x, name='in3C')
for i in range(3):
x = InceptionC(x, name='in%dC' %(i+1))
#Average Pooling
x = mx.sym.Pooling(x, kernel=(8, 8), pad=(1, 1), pool_type='avg', name='global_avgpool')
#Dropout
x = mx.sym.Dropout(x, p=0.2)
flatten = mx.sym.Flatten(x, name='flatten')
fc1 = mx.sym.FullyConnected(flatten, num_hidden=num_classes, name='fc1')
if dtype == 'float16':
fc1 = mx.sym.Cast(data=fc1, dtype=np.float32)
softmax = mx.sym.SoftmaxOutput(fc1, name='softmax')
return softmax
| apache-2.0 | -6,277,725,470,599,457,000 | 39.493023 | 152 | 0.584539 | false | 2.479635 | false | false | false | 0.011371 |
unclev/vk.unclev.ru | extensions/sticker.py | 1 | 1937 | # coding: utf-8
# This file is a part of VK4XMPP transport
# © simpleApps, 2015.
import base64
from tempfile import mktemp
from cStringIO import StringIO
sticker_url = re.compile(r"^Sticker\:\s(http[s]?\:\/\/[a-zA-Z0-9\._\/]+)$")
try:
from PIL import Image
except ImportError:
logger.warning("sticker: not enabling RGB conversion because PIL is not installed")
ENABLE_RGB_CONVERSION = False
if not isdef("STICKER_SIZE"):
STICKER_SIZE = "128"
GLOBAL_USER_SETTINGS["send_stickers"] = {"label": "Send stickers with XHTML-IM",
"desc": "If set, transport would send images for stickers instead of URLs (requires client-side support)", "value": 0}
def convertImage(data):
outfile = mktemp()
io = StringIO(data)
image = Image.open(io)
image.convert("RGB").save(outfile, "JPEG", quality=RGB_CONVERSION_QUALITY)
data = rFile(outfile)
try:
os.remove(outfile)
except Exception:
crashLog("convertImage")
return data
def sendSticker(msg, destination, source):
body = msg.getBody()
if body:
if msg.getType() == "groupchat":
user = Chat.getUserObject(destination)
else:
user = Transport.get(destination)
if user and user.settings.send_stickers:
url = sticker_url.search(body)
if url:
url = url.group(1).replace("256b", STICKER_SIZE)
data = urllib.urlopen(url).read()
if data:
mime = "png"
if isdef("ENABLE_RGB_CONVERSION") and ENABLE_RGB_CONVERSION:
data = convertImage(data)
mime = "jpeg"
data = base64.b64encode(data)
xhtml = msg.setTag("html", namespace=xmpp.NS_XHTML_IM)
xbody = xhtml.setTag("body", namespace="http://www.w3.org/1999/xhtml")
xbody.setTag("br")
xbody.setTag("img", {"src": "data:image/%s;base64,%s" % (mime, data), "alt": "img"})
def initStickerSender():
if xmpp.NS_GROUPCHAT in TransportFeatures:
registerHandler("msg03g", sendSticker)
registerHandler("evt01", initStickerSender)
registerHandler("msg03", sendSticker) | mit | -641,610,603,809,470,000 | 27.910448 | 119 | 0.694215 | false | 2.955725 | false | false | false | 0.023244 |
mefly2012/platform | src/clean/guizhou_zhaobiao.py | 2 | 1991 | # -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
import re
class guizhou_zhaobiao():
need_check_ziduan = [u'title',
u'city',
u'pubdate',
u'data_sources',
u'company_name_invite',
u'bidwinning_pubdate'
]
def check_title(self, source, ustr):
"""title 校验"""
ret = None
if ustr and len(ustr):
if ustr and len(ustr):
if any(c in u')(' for c in ustr):
ret = u'有特殊符号'
return ret
def check_city(self, source, ustr):
"""地区 校验"""
ret = None
if ustr and len(ustr):
if ustr != u'贵州':
ret = u"city不为贵州"
return ret
def check_pubdate(self, source, ustr):
"""发布日期 校验"""
ret = None
if ustr and len(ustr):
if not public.date_format(ustr):
ret = u"不合法日期"
return ret
def check_data_sources(self, source, ustr):
"""数据来源 校验"""
ret = None
if ustr and len(ustr):
if ustr != u'贵州招中标网':
ret = u"不为贵州招中标网"
return ret
def check_company_name_invite(self, source, ustr):
"""招标单位名称 校验"""
ret = None
SPECIAL_STR = ur"[ .。##,,??/、\`~;;•·$¥@!!^…'’‘**%)(]"
if ustr and len(ustr):
if re.compile(SPECIAL_STR).search(ustr):
ret = u'包含特殊字符'
return ret
def check_bidwinning_pubdate(self, source, ustr):
"""中标公告发布时间 校验"""
ret = None
if ustr and len(ustr):
if not public.date_format(ustr):
ret = u"不合法日期"
return ret
| apache-2.0 | -5,191,029,656,646,780,000 | 25.558824 | 62 | 0.45515 | false | 3.015025 | false | false | false | 0.001661 |
ThomasSweijen/TPF | examples/clumps/triax-basic-with-clumps.py | 5 | 2182 | # encoding: utf-8
# Copyright (C) 2012 by Bruno Chareyre
# An update of the original script from Janek Kozicki
from yade import pack
from numpy import arange
import itertools
import random
import yade.plot
## corners of the initial packing
mn,mx=Vector3(0,0,0),Vector3(10,10,10)
## create material #0, which will be used as default
O.materials.append(FrictMat(young=6e6,poisson=.4,frictionAngle=radians(5),density=2600))
O.materials.append(FrictMat(young=6e6,poisson=.4,frictionAngle=0,density=2600,label='frictionless'))
d=8
# clumps
for xyz in itertools.product(arange(0,d),arange(0,d),arange(0,d)):
ids_spheres=O.bodies.appendClumped(pack.regularHexa(pack.inEllipsoid((mn[0]+xyz[0]*(mx[0]-mn[0])/d,mn[0]+xyz[1]*(mx[1]-mn[1])/d,mn[2]+xyz[2]*(mx[2]-mn[2])/d),(0.45+random.random()*0.1,0.45+random.random()*0.1,0.45+random.random()*0.1)),radius=0.15+random.random()*0.05,gap=0,color=[random.random(),random.random(),random.random()]))
## create walls around the packing
walls=aabbWalls(material='frictionless')
wallIds=O.bodies.append(walls)
from yade import qt
qt.Controller()
qt.View()
## hope that we got the ids right?!
triax=TriaxialCompressionEngine(
wall_bottom_id=wallIds[2],
wall_top_id=wallIds[3],
wall_left_id=wallIds[0],
wall_right_id=wallIds[1],
wall_back_id=wallIds[4],
wall_front_id=wallIds[5],
internalCompaction=False,
sigmaIsoCompaction=50e3,
sigmaLateralConfinement=50e3,
strainRate=0.02,
frictionAngleDegree=30,
StabilityCriterion = 0.01,
max_vel=1,
)
recorder=TriaxialStateRecorder(
iterPeriod=20,
file="./WallStresses_clumps",
truncate=1
)
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
GlobalStiffnessTimeStepper(timestepSafetyCoefficient=0.6),
triax,
recorder,
# you can add TriaxialStateRecorder and such here…
NewtonIntegrator(damping=.4)
]
#yade.plot.plots={'eps':('sigma',)}
#O.saveTmp('initial');
#def addPlotData():
# yade.plot.addData({'t':O.time,'i':O.iter,'eps':strainer.strain,'sigma':strainer.avgStress})
| gpl-2.0 | 5,834,077,862,425,454,000 | 27.311688 | 333 | 0.736239 | false | 2.460497 | false | false | false | 0.038532 |
CrushAndRun/Cloudbot-Fluke | plugins/lurve.py | 1 | 1239 | import random
from cloudbot import hook
@hook.command("lurve","luff", "luv")
def lurve(text, nick, message):
"""lurves all over <user>"""
target = text.strip()
# Use {N} to represent the person's nickname who is performing the action
# Use {T} to represent the person's nickname who is the target of the action
loving = [
"{N} wraps arms around {T} and clings forever",
"{N} cuddles {T} in the fluffiest blanket ever",
"{N} lays their head on the lap of {T} and goes to sleep, dreaming da best sweet dreams",
"{N} caresses {T}'s hair",
"{N} caresses {T}'s cheek",
"{N} plants a shy kiss on {T}'s cheek",
"{N} gives {T} a BIIIIIIIIG hug!!!",
"{N} lovingly tackles {T} into a pit of the softest pillows ever",
"{N} cheers happily for {T}!!",
"{N} pulls {T} back into bed for more cuddles ♥~",
"{N} snuggles {T} for Netflix and chili popcorn",
"{N} happily kisses {T} on the cheek",
"{N} shares a milkshake with {T}"
];
out = "{}".format(random.choice(loving))
out = out.replace("{N}", nick)
out = out.replace("{T}", target)
message(out)
| gpl-3.0 | -7,759,147,749,937,587,000 | 36.484848 | 101 | 0.561843 | false | 3.171795 | false | false | false | 0.003234 |
MisterTea/HyperNEAT | boost_1_57_0/libs/python/test/enum.py | 12 | 1840 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from enum_ext import *
>>> identity(color.red) # in case of duplicated enums it always take the last enum
enum_ext.color.blood
>>> identity(color.green)
enum_ext.color.green
>>> identity(color.blue)
enum_ext.color.blue
>>> identity(color(1)) # in case of duplicated enums it always take the last enum
enum_ext.color.blood
>>> identity(color(2))
enum_ext.color.green
>>> identity(color(3))
enum_ext.color(3)
>>> identity(color(4))
enum_ext.color.blue
--- check export to scope ---
>>> identity(red)
enum_ext.color.blood
>>> identity(green)
enum_ext.color.green
>>> identity(blue)
enum_ext.color.blue
>>> try: identity(1)
... except TypeError: pass
... else: print 'expected a TypeError'
>>> c = colorized()
>>> c.x
enum_ext.color.blood
>>> c.x = green
>>> c.x
enum_ext.color.green
>>> red == blood
True
>>> red == green
False
>>> hash(red) == hash(blood)
True
>>> hash(red) == hash(green)
False
'''
# pickling of enums only works with Python 2.3 or higher
exercise_pickling = '''
>>> import pickle
>>> p = pickle.dumps(color.green, pickle.HIGHEST_PROTOCOL)
>>> l = pickle.loads(p)
>>> identity(l)
enum_ext.color.green
'''
def run(args = None):
import sys
import doctest
import pickle
if args is not None:
sys.argv = args
self = sys.modules.get(__name__)
if (hasattr(pickle, "HIGHEST_PROTOCOL")):
self.__doc__ += exercise_pickling
return doctest.testmod(self)
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| bsd-3-clause | 3,445,748,293,045,594,600 | 19.647059 | 82 | 0.625 | false | 3.082077 | false | false | false | 0.004348 |
jianjunz/online-judge-solutions | leetcode/1223-graph-connectivity-with-threshold.py | 2 | 1095 | class Solution:
def areConnected(self, n: int, threshold: int, queries: List[List[int]]) -> List[bool]:
cities=[0]*(n+1)
group={}
nextGroupId=1
def union(source, to):
if source==to:
return
for c in group[source]:
cities[c]=to
group[to].extend(group[source])
del group[source]
for base in range(threshold+1, n):
currentGroupId=nextGroupId
nextGroupId+=1
group[currentGroupId]=[]
for member in range(base, n+1, base):
if cities[member]==0:
cities[member]=currentGroupId
group[currentGroupId].append(member)
else:
union(cities[member], currentGroupId)
answer=[False]*len(queries)
for i in range(len(queries)):
u,v=queries[i]
if cities[u]==cities[v] and cities[u]!=0:
answer[i]=True
return answer
| mit | 5,391,455,218,779,802,000 | 32.28125 | 91 | 0.46758 | false | 4.415323 | false | false | false | 0.021005 |
sciCloud/OLiMS | lims/browser/fields/datetimefield.py | 2 | 2641 | from time import strptime
from dependencies.dependency import ClassSecurityInfo
from dependencies.dependency import DateTime, safelocaltime
from dependencies.dependency import DateTimeError
from dependencies.dependency import registerField
from dependencies.dependency import IDateTimeField
from dependencies.dependency import *
from dependencies.dependency import DateTimeField as DTF
from lims import logger
from dependencies.dependency import implements
class DateTimeField(DTF):
"""A field that stores dates and times
This is identical to the AT widget on which it's based, but it checks
the i18n translation values for date formats. This does not specifically
check the date_format_short_datepicker, so this means that date_formats
should be identical between the python strftime and the jquery version.
"""
_properties = Field._properties.copy()
_properties.update({
'type': 'datetime',
'widget': CalendarWidget,
})
implements(IDateTimeField)
security = ClassSecurityInfo()
security.declarePrivate('set')
def set(self, instance, value, **kwargs):
"""
Check if value is an actual date/time value. If not, attempt
to convert it to one; otherwise, set to None. Assign all
properties passed as kwargs to object.
"""
val = value
if not value:
val = None
elif not isinstance(value, DateTime):
for fmt in ['date_format_long', 'date_format_short']:
fmtstr = instance.translate(fmt, domain='bika', mapping={})
fmtstr = fmtstr.replace(r"${", '%').replace('}', '')
try:
val = strptime(value, fmtstr)
except ValueError:
continue
try:
val = DateTime(*list(val)[:-6])
except DateTimeError:
val = None
if val.timezoneNaive():
# Use local timezone for tz naive strings
# see http://dev.plone.org/plone/ticket/10141
zone = val.localZone(safelocaltime(val.timeTime()))
parts = val.parts()[:-1] + (zone,)
val = DateTime(*parts)
break
else:
logger.warning("DateTimeField failed to format date "
"string '%s' with '%s'" % (value, fmtstr))
super(DateTimeField, self).set(instance, val, **kwargs)
registerField(DateTimeField,
title='Date Time',
description='Used for storing date/time')
| agpl-3.0 | 4,101,780,731,871,722,000 | 36.197183 | 77 | 0.605074 | false | 4.881701 | false | false | false | 0.000379 |
ArneBab/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/plat-mac/lib-scriptpackages/StdSuites/__init__.py | 73 | 12854 | """
Package generated from /Volumes/Sap/System Folder/Extensions/AppleScript
Resource aeut resid 0 Standard Event Suites for English
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the StdSuites package is removed.", stacklevel=2)
import aetools
Error = aetools.Error
import Text_Suite
import AppleScript_Suite
import Standard_Suite
import Macintosh_Connectivity_Clas
import QuickDraw_Graphics_Suite
import QuickDraw_Graphics_Suppleme
import Required_Suite
import Table_Suite
import Type_Names_Suite
_code_to_module = {
'TEXT' : Text_Suite,
'ascr' : AppleScript_Suite,
'core' : Standard_Suite,
'macc' : Macintosh_Connectivity_Clas,
'qdrw' : QuickDraw_Graphics_Suite,
'qdsp' : QuickDraw_Graphics_Suppleme,
'reqd' : Required_Suite,
'tbls' : Table_Suite,
'tpnm' : Type_Names_Suite,
}
_code_to_fullname = {
'TEXT' : ('StdSuites.Text_Suite', 'Text_Suite'),
'ascr' : ('StdSuites.AppleScript_Suite', 'AppleScript_Suite'),
'core' : ('StdSuites.Standard_Suite', 'Standard_Suite'),
'macc' : ('StdSuites.Macintosh_Connectivity_Clas', 'Macintosh_Connectivity_Clas'),
'qdrw' : ('StdSuites.QuickDraw_Graphics_Suite', 'QuickDraw_Graphics_Suite'),
'qdsp' : ('StdSuites.QuickDraw_Graphics_Suppleme', 'QuickDraw_Graphics_Suppleme'),
'reqd' : ('StdSuites.Required_Suite', 'Required_Suite'),
'tbls' : ('StdSuites.Table_Suite', 'Table_Suite'),
'tpnm' : ('StdSuites.Type_Names_Suite', 'Type_Names_Suite'),
}
from Text_Suite import *
from AppleScript_Suite import *
from Standard_Suite import *
from Macintosh_Connectivity_Clas import *
from QuickDraw_Graphics_Suite import *
from QuickDraw_Graphics_Suppleme import *
from Required_Suite import *
from Table_Suite import *
from Type_Names_Suite import *
def getbaseclasses(v):
if not getattr(v, '_propdict', None):
v._propdict = {}
v._elemdict = {}
for superclassname in getattr(v, '_superclassnames', []):
superclass = eval(superclassname)
getbaseclasses(superclass)
v._propdict.update(getattr(superclass, '_propdict', {}))
v._elemdict.update(getattr(superclass, '_elemdict', {}))
v._propdict.update(getattr(v, '_privpropdict', {}))
v._elemdict.update(getattr(v, '_privelemdict', {}))
import StdSuites
#
# Set property and element dictionaries now that all classes have been defined
#
getbaseclasses(graphic_group)
getbaseclasses(oval)
getbaseclasses(graphic_text)
getbaseclasses(graphic_shape)
getbaseclasses(drawing_area)
getbaseclasses(graphic_line)
getbaseclasses(polygon)
getbaseclasses(pixel)
getbaseclasses(rounded_rectangle)
getbaseclasses(graphic_object)
getbaseclasses(arc)
getbaseclasses(pixel_map)
getbaseclasses(rectangle)
getbaseclasses(selection_2d_object)
getbaseclasses(application)
getbaseclasses(document)
getbaseclasses(window)
getbaseclasses(file)
getbaseclasses(alias)
getbaseclasses(insertion_point)
getbaseclasses(character)
getbaseclasses(paragraph)
getbaseclasses(word)
getbaseclasses(text_flow)
getbaseclasses(text_style_info)
getbaseclasses(line)
getbaseclasses(text)
getbaseclasses(AppleTalk_address)
getbaseclasses(address_specification)
getbaseclasses(Token_Ring_address)
getbaseclasses(FireWire_address)
getbaseclasses(bus_slot)
getbaseclasses(SCSI_address)
getbaseclasses(ADB_address)
getbaseclasses(USB_address)
getbaseclasses(device_specification)
getbaseclasses(LocalTalk_address)
getbaseclasses(IP_address)
getbaseclasses(Ethernet_address)
getbaseclasses(graphic_group)
getbaseclasses(drawing_area)
getbaseclasses(cell)
getbaseclasses(column)
getbaseclasses(table)
getbaseclasses(row)
getbaseclasses(small_integer)
getbaseclasses(system_dictionary)
getbaseclasses(color_table)
getbaseclasses(fixed_point)
getbaseclasses(plain_text)
getbaseclasses(type_element_info)
getbaseclasses(machine_location)
getbaseclasses(PostScript_picture)
getbaseclasses(type_suite_info)
getbaseclasses(menu_item)
getbaseclasses(pixel_map_record)
getbaseclasses(small_real)
getbaseclasses(null)
getbaseclasses(rotation)
getbaseclasses(fixed)
getbaseclasses(long_point)
getbaseclasses(target_id)
getbaseclasses(type_property_info)
getbaseclasses(type_parameter_info)
getbaseclasses(long_fixed_point)
getbaseclasses(bounding_rectangle)
getbaseclasses(TIFF_picture)
getbaseclasses(long_fixed)
getbaseclasses(location_reference)
getbaseclasses(version)
getbaseclasses(RGB16_color)
getbaseclasses(double_integer)
getbaseclasses(type_event_info)
getbaseclasses(point)
getbaseclasses(application_dictionary)
getbaseclasses(unsigned_integer)
getbaseclasses(menu)
getbaseclasses(fixed_rectangle)
getbaseclasses(long_fixed_rectangle)
getbaseclasses(type_class_info)
getbaseclasses(RGB96_color)
getbaseclasses(dash_style)
getbaseclasses(scrap_styles)
getbaseclasses(extended_real)
getbaseclasses(long_rectangle)
getbaseclasses(May)
getbaseclasses(string)
getbaseclasses(miles)
getbaseclasses(number_or_date)
getbaseclasses(October)
getbaseclasses(event)
getbaseclasses(Pascal_string)
getbaseclasses(zone)
getbaseclasses(picture)
getbaseclasses(list_or_string)
getbaseclasses(number)
getbaseclasses(Tuesday)
getbaseclasses(version)
getbaseclasses(December)
getbaseclasses(square_kilometres)
getbaseclasses(reference)
getbaseclasses(vector)
getbaseclasses(weekday)
getbaseclasses(Sunday)
getbaseclasses(international_text)
getbaseclasses(seconds)
getbaseclasses(RGB_color)
getbaseclasses(kilometres)
getbaseclasses(styled_Unicode_text)
getbaseclasses(missing_value)
getbaseclasses(metres)
getbaseclasses(number_or_string)
getbaseclasses(list)
getbaseclasses(linked_list)
getbaseclasses(real)
getbaseclasses(encoded_string)
getbaseclasses(list_or_record)
getbaseclasses(Monday)
getbaseclasses(September)
getbaseclasses(anything)
getbaseclasses(property)
getbaseclasses(reference_form)
getbaseclasses(item)
getbaseclasses(grams)
getbaseclasses(record)
getbaseclasses(empty_ae_name_)
getbaseclasses(constant)
getbaseclasses(square_miles)
getbaseclasses(data)
getbaseclasses(Unicode_text)
getbaseclasses(yards)
getbaseclasses(cubic_yards)
getbaseclasses(pounds)
getbaseclasses(cubic_centimetres)
getbaseclasses(text)
getbaseclasses(July)
getbaseclasses(cubic_metres)
getbaseclasses(styled_text)
getbaseclasses(number_2c__date_or_text)
getbaseclasses(feet)
getbaseclasses(February)
getbaseclasses(degrees_Celsius)
getbaseclasses(keystroke)
getbaseclasses(integer)
getbaseclasses(degrees_Fahrenheit)
getbaseclasses(list_2c__record_or_text)
getbaseclasses(date)
getbaseclasses(degrees_Kelvin)
getbaseclasses(centimetres)
getbaseclasses(writing_code)
getbaseclasses(alias_or_string)
getbaseclasses(writing_code_info)
getbaseclasses(text_item)
getbaseclasses(machine)
getbaseclasses(type_class)
getbaseclasses(preposition)
getbaseclasses(Wednesday)
getbaseclasses(upper_case)
getbaseclasses(March)
getbaseclasses(square_feet)
getbaseclasses(November)
getbaseclasses(quarts)
getbaseclasses(alias)
getbaseclasses(January)
getbaseclasses(month)
getbaseclasses(June)
getbaseclasses(August)
getbaseclasses(styled_Clipboard_text)
getbaseclasses(gallons)
getbaseclasses(cubic_inches)
getbaseclasses(Friday)
getbaseclasses(sound)
getbaseclasses(class_)
getbaseclasses(kilograms)
getbaseclasses(script)
getbaseclasses(litres)
getbaseclasses(boolean)
getbaseclasses(square_metres)
getbaseclasses(inches)
getbaseclasses(character)
getbaseclasses(April)
getbaseclasses(ounces)
getbaseclasses(app)
getbaseclasses(handler)
getbaseclasses(C_string)
getbaseclasses(Thursday)
getbaseclasses(square_yards)
getbaseclasses(cubic_feet)
getbaseclasses(Saturday)
getbaseclasses(file_specification)
#
# Indices of types declared in this module
#
_classdeclarations = {
'cpic' : graphic_group,
'covl' : oval,
'cgtx' : graphic_text,
'cgsh' : graphic_shape,
'cdrw' : drawing_area,
'glin' : graphic_line,
'cpgn' : polygon,
'cpxl' : pixel,
'crrc' : rounded_rectangle,
'cgob' : graphic_object,
'carc' : arc,
'cpix' : pixel_map,
'crec' : rectangle,
'csel' : selection_2d_object,
'capp' : application,
'docu' : document,
'cwin' : window,
'file' : file,
'alis' : alias,
'cins' : insertion_point,
'cha ' : character,
'cpar' : paragraph,
'cwor' : word,
'cflo' : text_flow,
'tsty' : text_style_info,
'clin' : line,
'ctxt' : text,
'cat ' : AppleTalk_address,
'cadr' : address_specification,
'ctok' : Token_Ring_address,
'cfw ' : FireWire_address,
'cbus' : bus_slot,
'cscs' : SCSI_address,
'cadb' : ADB_address,
'cusb' : USB_address,
'cdev' : device_specification,
'clt ' : LocalTalk_address,
'cip ' : IP_address,
'cen ' : Ethernet_address,
'cpic' : graphic_group,
'cdrw' : drawing_area,
'ccel' : cell,
'ccol' : column,
'ctbl' : table,
'crow' : row,
'shor' : small_integer,
'aeut' : system_dictionary,
'clrt' : color_table,
'fpnt' : fixed_point,
'TEXT' : plain_text,
'elin' : type_element_info,
'mLoc' : machine_location,
'EPS ' : PostScript_picture,
'suin' : type_suite_info,
'cmen' : menu_item,
'tpmm' : pixel_map_record,
'sing' : small_real,
'null' : null,
'trot' : rotation,
'fixd' : fixed,
'lpnt' : long_point,
'targ' : target_id,
'pinf' : type_property_info,
'pmin' : type_parameter_info,
'lfpt' : long_fixed_point,
'qdrt' : bounding_rectangle,
'TIFF' : TIFF_picture,
'lfxd' : long_fixed,
'insl' : location_reference,
'vers' : version,
'tr16' : RGB16_color,
'comp' : double_integer,
'evin' : type_event_info,
'QDpt' : point,
'aete' : application_dictionary,
'magn' : unsigned_integer,
'cmnu' : menu,
'frct' : fixed_rectangle,
'lfrc' : long_fixed_rectangle,
'gcli' : type_class_info,
'tr96' : RGB96_color,
'tdas' : dash_style,
'styl' : scrap_styles,
'exte' : extended_real,
'lrct' : long_rectangle,
'may ' : May,
'TEXT' : string,
'mile' : miles,
'nd ' : number_or_date,
'oct ' : October,
'evnt' : event,
'pstr' : Pascal_string,
'zone' : zone,
'PICT' : picture,
'ls ' : list_or_string,
'nmbr' : number,
'tue ' : Tuesday,
'vers' : version,
'dec ' : December,
'sqkm' : square_kilometres,
'obj ' : reference,
'vect' : vector,
'wkdy' : weekday,
'sun ' : Sunday,
'itxt' : international_text,
'scnd' : seconds,
'cRGB' : RGB_color,
'kmtr' : kilometres,
'sutx' : styled_Unicode_text,
'msng' : missing_value,
'metr' : metres,
'ns ' : number_or_string,
'list' : list,
'llst' : linked_list,
'doub' : real,
'encs' : encoded_string,
'lr ' : list_or_record,
'mon ' : Monday,
'sep ' : September,
'****' : anything,
'prop' : property,
'kfrm' : reference_form,
'cobj' : item,
'gram' : grams,
'reco' : record,
'undf' : empty_ae_name_,
'enum' : constant,
'sqmi' : square_miles,
'rdat' : data,
'utxt' : Unicode_text,
'yard' : yards,
'cyrd' : cubic_yards,
'lbs ' : pounds,
'ccmt' : cubic_centimetres,
'ctxt' : text,
'jul ' : July,
'cmet' : cubic_metres,
'STXT' : styled_text,
'nds ' : number_2c__date_or_text,
'feet' : feet,
'feb ' : February,
'degc' : degrees_Celsius,
'kprs' : keystroke,
'long' : integer,
'degf' : degrees_Fahrenheit,
'lrs ' : list_2c__record_or_text,
'ldt ' : date,
'degk' : degrees_Kelvin,
'cmtr' : centimetres,
'psct' : writing_code,
'sf ' : alias_or_string,
'citl' : writing_code_info,
'citm' : text_item,
'mach' : machine,
'type' : type_class,
'prep' : preposition,
'wed ' : Wednesday,
'case' : upper_case,
'mar ' : March,
'sqft' : square_feet,
'nov ' : November,
'qrts' : quarts,
'alis' : alias,
'jan ' : January,
'mnth' : month,
'jun ' : June,
'aug ' : August,
'styl' : styled_Clipboard_text,
'galn' : gallons,
'cuin' : cubic_inches,
'fri ' : Friday,
'snd ' : sound,
'pcls' : class_,
'kgrm' : kilograms,
'scpt' : script,
'litr' : litres,
'bool' : boolean,
'sqrm' : square_metres,
'inch' : inches,
'cha ' : character,
'apr ' : April,
'ozs ' : ounces,
'capp' : app,
'hand' : handler,
'cstr' : C_string,
'thu ' : Thursday,
'sqyd' : square_yards,
'cfet' : cubic_feet,
'sat ' : Saturday,
'fss ' : file_specification,
}
class StdSuites(Text_Suite_Events,
AppleScript_Suite_Events,
Standard_Suite_Events,
Macintosh_Connectivity_Clas_Events,
QuickDraw_Graphics_Suite_Events,
QuickDraw_Graphics_Suppleme_Events,
Required_Suite_Events,
Table_Suite_Events,
Type_Names_Suite_Events,
aetools.TalkTo):
_signature = 'ascr'
_moduleName = 'StdSuites'
| mit | -1,221,008,212,516,907,500 | 26.061053 | 86 | 0.699238 | false | 2.946138 | false | false | false | 0.018905 |
SDSG-Invenio/invenio | invenio/modules/records/bundles.py | 8 | 1075 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Records bundles."""
from invenio.ext.assets import Bundle, RequireJSFilter
js = Bundle(
"js/records/init.js",
filters=RequireJSFilter(),
output="records.js",
weight=20
)
css = Bundle(
"css/records/record.css",
output="record.css",
weight=20,
filters="cleancss"
)
| gpl-2.0 | 5,308,217,067,452,991,000 | 28.054054 | 74 | 0.71814 | false | 3.798587 | false | false | false | 0 |
JoseALermaIII/clashcallerbot-reddit | clashcallerbotreddit/search.py | 1 | 14692 | #! python3
# -*- coding: utf-8 -*-
"""Searches recent reddit comments for ClashCaller! string and saves to database.
This module uses the Python Reddit API Wrapper (PRAW) to search recent reddit comments
for the ClashCaller! string.
If found, the userID, permalink, comment time, message, and
expiration time (if any) are parsed. The default, or provided, expiration time is
applied, then all the comment data is saved to a MySQL-compatible database.
The comment is replied to, then the userID is PM'ed confirmation."""
import praw
import praw.exceptions
import prawcore.exceptions
import logging.config
import re
import datetime
import time
import urllib3.exceptions
from socket import gaierror
from clashcallerbotreddit.database import ClashCallerDatabase
from clashcallerbotreddit import LOGGING, config
# Logger
logging.config.dictConfig(LOGGING)
# FIXME: logging.raiseExceptions = False crashes during exception. Maybe remove console handler?
logging.raiseExceptions = True # Production mode if False (no console sys.stderr output)
logger = logging.getLogger('search')
# Generate reddit instance
reddit = praw.Reddit('clashcallersearch') # Section name in praw.ini
#subreddit = reddit.subreddit('ClashCallerBot') # Limit scope for testing purposes
subreddit = reddit.subreddit('all') # Production mode
reddituser = reddit.user.me()
# Regular expressions
clashcaller_re = re.compile(r'''
[!|\s]? # prefix ! or space (optional)
[C|c]lash[C|c]aller # lower or camelcase ClashCaller (required)
[!|\s] # suffix ! or space (required)
''', re.VERBOSE)
expiration_re = re.compile(r'''
(?P<exp_digit>(\d){1,2}) # single or double digit (required)
(\s)? # space (optional)
(?P<exp_unit>minute(s)?\s| # minute(s) (space after required)
min(s)?\s| # minute abbr. (space after required)
hour(s)?\s| # hour(s) (space after required)
hr(s)?\s # hour abbr. (space after required)
)+''', re.VERBOSE | re.IGNORECASE) # case-insensitive
message_re = re.compile(r'''
(\s)* # space (optional)
" # opening double quote (required)
base(s)? # string: base(s) (required)
[\W|\s]* # non-word character or space (optional)
(\d){1,2} # single or double digit (required)
.* # any character after (optional)
" # closing double quote (required)
''', re.VERBOSE | re.IGNORECASE) # case-insensitive
# Make database instance
db = ClashCallerDatabase(config, root_user=False)
start_time = datetime.datetime.now(datetime.timezone.utc)
def main():
logger.info('Start search.py...')
while True:
try:
# Search recent comments for ClashCaller! string
for comment in subreddit.stream.comments():
match = clashcaller_re.search(comment.body)
if not match:
# Skip comments that don't have the clashcaller string
continue
if not is_recent(comment.created_utc, start_time):
# Skip comments that are before start_time
continue
if comment.author.name == reddituser.name:
# Skip bot's comments
continue
if have_replied(comment, reddituser.name):
# Skip comments already replied to
logger.debug(f'Skipping {comment}: already replied.')
continue
logger.info(f'In: {comment}')
# Strip everything before and including ClashCaller! string
comment.body = comment.body[match.end():].strip()
logger.debug(f'Stripped comment body: {comment.body}')
# Check for expiration time
minute_tokens = ('min', 'mins', 'minute', 'minutes')
match = expiration_re.search(comment.body)
if not match:
timedelta = datetime.timedelta(hours=1) # Default to 1 hour
else:
exp_digit = int(match.group('exp_digit').strip())
if exp_digit == 0: # ignore zeros
# Send message and ignore comment
error = 'Expiration time is zero.'
# send_error_message(comment.author.name, comment.permalink, error)
logging.error(error)
continue
exp_unit = match.group('exp_unit').strip().lower()
if exp_unit in minute_tokens:
timedelta = datetime.timedelta(minutes=exp_digit)
else:
if exp_digit >= 24: # ignore days
# Send message and ignore comment
error = 'Expiration time is >= 1 day.'
# send_error_message(comment.author.name, comment.permalink, error)
logging.error(error)
continue
timedelta = datetime.timedelta(hours=exp_digit)
logger.debug(f'timedelta = {timedelta.seconds} seconds')
# Apply expiration time to comment date
comment_datetime = datetime.datetime.fromtimestamp(comment.created_utc, datetime.timezone.utc)
expiration_datetime = comment_datetime + timedelta
logger.info(f'comment_datetime = {comment_datetime}')
logger.info(f'expiration_datetime = {expiration_datetime}')
# Ignore if expire time passed
if expiration_datetime < datetime.datetime.now(datetime.timezone.utc):
# Send message and ignore comment
error = 'Expiration time has already passed.'
# send_error_message(comment.author.name, comment.permalink, error)
logging.error(error)
continue
# Strip expiration time
comment.body = comment.body[match.end():].strip()
# Evaluate message
if len(comment.body) > 100:
# Send message and ignore comment
error = 'Message length > 100 characters.'
# send_error_message(comment.author.name, comment.permalink, error)
logger.error(error)
continue
match = message_re.search(comment.body)
if not match:
# Send message and ignore comment
error = 'Message not properly formatted.'
# send_error_message(comment.author.name, comment.permalink, error)
logger.error(error)
continue
message = comment.body
logger.debug(f'message = {message}')
# Save message data to MySQL-compatible database
db.open_connections()
db.save_message(comment.permalink, message, expiration_datetime, comment.author.name)
db.close_connections()
# Reply and send PM
send_confirmation(comment.author.name, comment.permalink, expiration_datetime)
send_confirmation_reply(comment, expiration_datetime, message)
except urllib3.exceptions.ConnectionError as err:
logger.exception(f'urllib3: {err}')
time.sleep(20)
pass
except gaierror as err:
logger.exception(f'socket: {err}')
time.sleep(20)
pass
except prawcore.exceptions.PrawcoreException as err:
logger.exception(f'prawcore: {err}')
time.sleep(60)
pass
except praw.exceptions.PRAWException as err:
logger.exception(f'praw: {err}')
time.sleep(10)
pass
except AttributeError as err:
logger.exception(f'AttributeError: {err}')
time.sleep(10)
pass
def send_message(usr_name: str, subject_arg: str, message_arg: str) -> None:
"""Send message to reddit user.
Sends a message to a reddit user with given subject line.
Args:
usr_name: username of user.
subject_arg: Subject line of message.
message_arg: Message to send.
Returns:
None.
"""
try:
reddit.redditor(usr_name).message(subject_arg, message_arg)
except praw.exceptions.PRAWException as err:
logger.exception(f'send_message: {err}')
def send_confirmation(usr_name: str, link: str, exp: datetime.datetime) -> None:
"""Send confirmation to reddit user.
Function sends given user confirmation of given expiration time with given link.
Args:
usr_name: username of user.
link: Permalink of comment.
exp: Expiration datetime of call.
Returns:
None.
"""
subject = f'{reddituser.name} Confirmation Sent'
permalink = 'https://np.reddit.com' + link # Permalinks are missing prefix
exp = datetime.datetime.strftime(exp, '%b. %d, %Y at %I:%M:%S %p %Z')
message = f"""{reddituser.name} here!
I will be messaging you on [**{exp}**](http://www.wolframalpha.com/input/?i={exp} To Local Time) to remind
you of [**this call.**]({permalink})
Thank you for entrusting us with your warring needs,
- {reddituser.name}
[^(More info)](https://www.reddit.com/r/{reddituser.name}/comments/4e9vo7/clashcallerbot_info/)
"""
try:
send_message(usr_name, subject, message)
except praw.exceptions.PRAWException as err:
logger.exception(f'send_confirmation: {err}')
def send_error_message(usr_name: str, link: str, error: str) -> None:
"""Send error message to reddit user.
Function sends given error to given user.
Args:
usr_name: username of user.
link: Permalink of comment.
error: Error to send to user.
Returns:
None.
"""
subject = 'Unable to save call due to error'
permalink = 'https://np.reddit.com' + link # Permalinks are missing prefix
message = f"""{reddituser.name} here!
I regret to inform you that I could not save [**your call**]({permalink}) because of:
{error}
Please delete your call to reduce spam and try again after making the
above change.
Thank you for entrusting us with your warring needs,
- {reddituser.name}
[^(More info)](https://www.reddit.com/r/{reddituser.name}/comments/4e9vo7/clashcallerbot_info/)
"""
try:
send_message(usr_name, subject, message)
except praw.exceptions.PRAWException as err:
logger.exception(f'send_error_message: {err}')
def send_confirmation_reply(cmnt_obj: reddit.comment, exp: datetime.datetime, message_arg: str):
"""Replies to a comment.
Function replies to a given comment object with a given message.
Args:
cmnt_obj: Comment object to reply to.
exp: Expiration datetime of call.
message_arg: Original call message.
Returns:
id of new comment if successful, None otherwise
"""
link = cmnt_obj.permalink
permalink = 'https://np.reddit.com' + link # Permalinks are missing prefix
pretty_exp = datetime.datetime.strftime(exp, '%b. %d, %Y at %I:%M:%S %p %Z') # Human readable datetime
message = f"""
I will be messaging you on [**{pretty_exp}**](http://www.wolframalpha.com/input/?i={pretty_exp} To Local Time)
to remind you of [**this call.**]({permalink})
Others can tap
[**REMIND ME, TOO**](https://www.reddit.com/message/compose/?to={reddituser.name}&subject=AddMe!&message=[{link}]{exp}{message_arg})
to send me a PM to be added to the call reminder and reduce spam.
You can also tap
[**REMOVE ME**](https://www.reddit.com/message/compose/?to={reddituser.name}&subject=DeleteMe!&message=[{link}]) to
remove yourself from the call reminder or
[**MY CALLS**](https://www.reddit.com/message/compose/?to={reddituser.name}&subject=MyCalls!&message=El Zilcho)
to list your current calls.
Thank you for entrusting us with your warring needs!
[^(More info)](https://www.reddit.com/r/{reddituser.name}/comments/4e9vo7/clashcallerbot_info/)
"""
comment_id = None
try:
comment_id = cmnt_obj.reply(message)
except praw.exceptions.PRAWException as err:
logger.exception(f'send_confirmation_reply: {err}')
return comment_id
def have_replied(cmnt_obj: reddit.comment, usr_name: str) -> bool:
"""Check if user has replied to a comment.
Function checks reply authors of given comment for given user.
Args:
cmnt_obj: Comment object to get replies of.
usr_name: Name of bot to check for.
Returns:
True if successful, False otherwise.
"""
try:
cmnt_obj.refresh() # Refreshes attributes of comment to load replies
# Keep fetching 20 new replies until it finishes
while True:
try:
replies = cmnt_obj.replies.replace_more()
break
except praw.exceptions.PRAWException as err:
logger.exception(f'comment.replies.replace_more: {err}')
time.sleep(1)
if not replies:
return False
for reply in replies:
if reply.author.name == usr_name:
return True
except praw.exceptions.PRAWException as err:
logger.exception(f'have_replied: {err}')
return False
def is_recent(cmnt_time: float, time_arg: datetime.datetime) -> bool:
"""Checks if comment is a recent comment.
Function compares given comment Unix timestamp with given time.
Args:
cmnt_time: Comment's created Unix timestamp in UTC.
time_arg: Time to compare with comment timestamp.
Returns:
True if comment's created time is after given time, False otherwise.
"""
cmnt_datetime = datetime.datetime.fromtimestamp(cmnt_time, datetime.timezone.utc)
if cmnt_datetime > time_arg:
return True
return False
# If run directly, instead of imported as a module, run main():
if __name__ == '__main__':
main()
| mit | 484,646,875,316,458,100 | 37.970822 | 133 | 0.588552 | false | 4.160861 | false | false | false | 0.003403 |
scvalencia/ROBOCOL_desastres | Raspberry_pi/Python/src/camera_pi.py | 15 | 1662 | import time
import io
import threading
import picamera
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
@classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (320, 240)
camera.hflip = True
camera.vflip = True
# let camera warm up
camera.start_preview()
time.sleep(2)
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
cls.thread = None
| mit | -4,075,274,190,415,845,400 | 29.777778 | 71 | 0.538508 | false | 4.775862 | false | false | false | 0 |
Farmijo/TFG-Dashboard | 1eros_ejemplos/Obtencion_datos_tests.py | 1 | 1621 | import json
f_out=open("processed_data/testsData/processed_test_marks_1attempt.json", "w")
f_out2=open("processed_data/testsData/processed_averages_1st_attempt.json", "w")
output = []
output2=[]
output3=[]
notas_medias= dict()
for i in range(187):
f = open("splitted/outputbatch_{}.json".format(i), "r")
myData = json.load(f)
#Parsing para (no) existencia de atributo
for line in myData:
if line["event_type"]== "problem_check" and line["event_source"]=="server":
#print json.dumps(line, sort_keys=True,indent=4, separators=(',', ': '))
if line["event"]["attempts"]==1:
instancia_curso=dict()
instancia_curso["alumno"]=line["username"]
instancia_curso["modulo"]=line["context"]["module"]["display_name"]
instancia_curso["nota"]=line["event"]["grade"]
instancia_curso["nota_ponderada"]=((line["event"]["grade"]) *10) / line["event"]["max_grade"]
lista=line["time"]
lista=lista.split('T')[0]
instancia_curso["fecha"]=lista
#print line
output.append(instancia_curso)
json.dump(output,f_out)
for line in output:
curso=line["modulo"]
if curso not in notas_medias.keys():
notas_medias[curso]=[]
notas_medias[curso].append(line["nota_ponderada"])
else:
notas_medias[curso].append(line["nota_ponderada"])
for ref in notas_medias.keys():
nota_media=dict()
nota_media[ref]=sum(notas_medias[ref])/float(len(notas_medias[ref]))
output2.append(nota_media)
i=0
for line in output2:
final =dict()
ref=output2[i].keys()[0]
final["curso"]=ref
final["nota_media"]=output2[i][ref]
output3.append(final)
i=i+1
json.dump(output3,f_out2)
| mit | -6,291,140,095,534,053,000 | 23.938462 | 101 | 0.668106 | false | 2.577107 | false | false | false | 0.048735 |
Distrotech/icedtea7-2.5 | contrib/templater/generate.py | 6 | 5675 | import os
import re
ENDIAN, BITS, RWINDOW = 0, 1, 2
cpus = {"ia64": ("little", 64, True),
"ppc": ("big", 32, False),
"ppc64": ("big", 64, False),
"s390": ("little", 32, False),
"s390x": ("little", 64, False)}
modes = {"ia64": ("ia64",),
"ppc": ("ppc", "ppc64"),
"s390": ("s390", "s390x")}
def isLittleEndian(cpu):
if cpus[cpu][ENDIAN] == "little":
return True
return ""
def is64bit(cpu):
# Only use this one for files with CPUS in their paths
if cpus[cpu][BITS] == 64:
return True
return ""
def isRegisterWindow(cpu):
if cpus[cpu][RWINDOW]:
return True
return ""
def preprocess(src, cpu):
other_cpus = [key.upper() for key in modes.keys() if key != cpu]
cpu = cpu.upper()
things = re.compile(r"(.*?)(/\*|//|^[ \t]*#)(.*)", re.S | re.M)
ends = {"/*": "*/"}
COPY, COND_TRUE, COND_FALSE = 1, 2, 3
mode = [COPY]
dst = ""
while src:
thing = things.match(src)
if not thing:
if COND_FALSE not in mode:
dst += src
break
before, thing, src = thing.groups()
src = thing + src
if COND_FALSE not in mode:
dst += before
end = ends.get(thing, "\n")
index = src.find(end)
assert index >= 0
index += len(end)
thing = src[:index]
src = src[index:]
if not thing.startswith("#"):
if COND_FALSE not in mode:
dst += thing
continue
args = thing.strip().split()
cmd = args.pop(0)
if cmd == "#":
cmd += args.pop(0)
if cmd in ("#include", "#define", "#undef"):
if COND_FALSE not in mode:
dst += thing
elif cmd in ("#ifdef", "#ifndef"):
us, them = {
"#ifdef": (COND_TRUE, COND_FALSE),
"#ifndef": (COND_FALSE, COND_TRUE)}[cmd]
[what] = args
if what == cpu:
mode.append(us)
elif what in other_cpus:
mode.append(them)
else:
mode.append(COPY)
if COND_FALSE not in mode:
dst += thing
elif cmd == "#if":
for check in [cpu] + other_cpus:
assert "defined(%s)" % check not in args
mode.append(COPY)
dst += thing
elif cmd == "#else":
if mode[-1] == COND_TRUE:
mode[-1] = COND_FALSE
elif mode[-1] == COND_FALSE:
mode[-1] = COND_TRUE
else:
if COND_FALSE not in mode:
dst += thing
elif cmd == "#endif":
if mode[-1] == COPY and COND_FALSE not in mode:
dst += thing
mode.pop()
else:
assert False
assert mode == [COPY]
if cpu == "PPC":
dst = dst.replace("_LP64", "PPC64")
return dst
def untabify(line):
bits = line.split("\t")
line = bits.pop(0)
for bit in bits:
line += " " * (8 - len(line) % 8)
line += bit
return line
def template(src, dst, basecpu, cpu):
bits = open(src, "r").read().split("@@")
assert len(bits) & 1
for i in xrange(1, len(bits), 2):
result = eval(bits[i].strip())
if not result:
result = ""
bits[i] = result
result = "".join(bits)
if src.split(".")[-1] in ("c", "h", "cpp", "hpp"):
result = preprocess(result, cpu)
if src.split(".")[-1] != "c":
result = "\n".join(
[untabify(line.rstrip()) for line in result.split("\n")])
if os.path.exists(dst):
existing = open(dst, "r").read()
if result == existing:
return
trim = os.getcwd() + os.sep
assert dst.startswith(trim)
print "creating", dst[len(trim):]
dir = os.path.dirname(dst)
if not os.path.isdir(dir):
os.makedirs(dir)
open(dst, "w").write(result)
def skip(item):
prefixes, suffixes = ["."], ["~", ".orig", ".rej"]
for prefix in prefixes:
if item.startswith(prefix):
return True
for suffix in suffixes:
if item.endswith(suffix):
return True
return False
def visit((srcdir, dstdir, cpus), dir, items):
if ".hg" in items:
items.remove(".hg")
for item in items:
if skip(item):
continue
path = os.path.join(dir, item)
if not os.path.isfile(path):
continue
if path.find("CPU") == -1:
continue
multi = path.find("CPUS") >= 0
trim = srcdir + os.sep
assert path.startswith(trim)
trim = len(trim)
for basecpu in cpus:
for cpu in modes[basecpu]:
template(
path, os.path.join(dstdir,path[trim:] \
.replace("CPUS", cpu) \
.replace("CPU", basecpu)),
basecpu, cpu)
if not multi:
break
if __name__ == "__main__":
import sys
srcdir = os.path.dirname(os.path.abspath(sys.argv[0]))
dstdir = os.path.join(os.getcwd(), "ports")
if not os.path.isdir(dstdir):
print >>sys.stderr, "run me within the IcedTea toplevel directory"
sys.exit(1)
if len(sys.argv) < 2:
cpus = modes.keys()
cpus.sort()
cpus = "|".join(cpus)
print >>sys.stderr, "usage: %s %s..." % (sys.argv[0], cpus)
sys.exit(1)
os.path.walk(srcdir, visit, (srcdir, dstdir, sys.argv[1:]))
| gpl-2.0 | 2,081,456,582,021,593,000 | 29.18617 | 74 | 0.471013 | false | 3.633163 | false | false | false | 0.002467 |
mkocka/galaxytea | MultiBlackBody/kamensky/mbb.py | 1 | 1942 | from pylab import *
from scipy import constants as const
import numpy as np
con_h = const.h/const.e
con_k = const.k/const.e
np.seterr(all = "ignore")
parsec = 3.08567758e16
m1 = 5.0 # central mass
alfa = 0.5 # accretion parameter
mass_flow = 5.0 # mass flow from disk
Rc = 1.0*10.0**-4 # diameter of central object /10**10cm
Ri = 1.001*10.0**-4 # inner radius
Rf = 10.0**-3 # outer radius
Rstep = 10.0**-6 # step for computing
Tc = []
R_x = []
for R in np.arange(Ri,Rf,Rstep): # creating the temperature profile
R_x.append(R)
f = (1.0-(Rc/R)**(0.5))**(0.25)
Tc.append(1.4*10.0**4.0*alfa**(-1.0/5)*mass_flow**(3.0/10)*m1**(1.0/4)*R**(-3.0/4)*f**(6.0/5))
figure(figsize=(8,5))
title('T$_{c}$')
xlabel('R$_{10}$ (R/($10^{10}$ cm))')
ylabel('T$_{c}$ (K)')
plot(R_x,Tc)
savefig("Tc.png")
def planck_law(E,T_in,T_out,step = 1): #returns rectangular integral of the planck_law created for constant energy and changing temperature
c1 = 2.0*const.pi/(con_h**3.0*const.c**2.0)*const.e
total = 0.0
planck = []
x = []
i = float(T_out)
while i<=float(T_in):
B = (i/T_in)**(-11.0/3.0)/T_in*c1*(E)**3.0*1.0/(np.exp(E/(con_k*i))-1.0)
total += B*step
i+=step
return total
def mbb(r_in,E_in,E_out,E_step,T_in,T_out,T_step,D,theta): # construct the mbb spectrum, takes r_in and T_in from the temperature profile
c1 = 8.0*const.pi*r_in**2.0*np.cos(np.radians(theta))/(3.0*D**2.0)
f_E = []
f = []
i = float(E_in)
while i<=float(E_out):
f_temp = c1*planck_law(i,T_in,T_out,T_step)
print i
f_E.append(i/1000.0)
f.append(f_temp)
i += E_step
return f_E, f
T_in_arg = np.argmax(Tc)
T_in = Tc[T_in_arg]
r_in = R_x[T_in_arg]*10.0**10.0/100.0
f_E,f = mbb(r_in,0.1,20001.0,10.0,T_in,1.0,1000.0,8.0*parsec,10.0)
figure(figsize=(8,5))
title('Planck')
xlabel('E (keV)')
ylabel('B$_{E}$(T) (W.m$^{-2}$.eV$^{-1}$)')
grid()
xlim([-1,20])
xticks(range(0,20,1))
plot(f_E,f)
savefig("planck.png")
| mit | 1,385,751,900,858,128,100 | 21.068182 | 142 | 0.594748 | false | 2.014523 | false | false | false | 0.046344 |
emd/mitpci | mitpci/signal.py | 1 | 12300 | '''This module implements a class for retrieving signals digitized by
the mitpci system.
'''
import numpy as np
import os
import MDSplus as mds
class Signal(object):
'''An object corresponding to the signal retrieved from the mitpci system.
Attributes:
-----------
shot - int
The shot number of the retrieved signal.
channel - int
The channel number of the retrieved signal.
x - array-like, (`N`,)
The retrieved signal. Note that this is the "raw" signal and
has the units of bits.
[x] = bits
Fs - float
The sampling rate at which the signal was retrieved. Note that this
may be smaller than the signal's initial digitization rate; that is,
the retrieved signal may be downsampled relative to the full signal.
[Fs] = samples / second
t0 - float
The time corresponding to the first retrieved point in the signal;
that is, if x(t) corresponds to the continuous signal being sampled,
then `Signal.x[0]` = x(t0)
[t0] = s
volts_per_bit - float
The conversion factor from bits to volts for the digitized signal.
[volts_per_bit] = V / bit
Methods:
--------
t - returns retrieved signal time-base, array-like, (`N`,)
The time-base is generated on the fly as needed and is not stored
as an object property; this helps save memory and processing time,
as we do not typically look at the raw signal vs. time.
[t] = s
'''
def __init__(self, shot, channel, channels_per_board=8,
Fs=None, tlim=None):
'''Create an instance of the `Signal` class.
Input parameters:
-----------------
shot - int
The shot number of the signal to be retrieved.
channel - int
The channel number of the signal to be retrieved.
Currently, there are two digitizer boards, each of which
digitizes `channels_per_board` channels. A ValueError is
raised if `channel <= 0` or `channel > 2 * channels_per_board`.
channels_per_board - int
The number of channels digitized per digitizer board.
Currently, there are two digitizer boards, each of which
digitizes 8 channels. The value of `channels_per_board`
determines whether the value of `channel` is valid or not.
Fs - float
The desired sampling rate at which to retrieve the signal.
This may differ from the original digitization rate, `Fs_dig`;
that is, the retrieved signal can be downsampled. While
the specified `Fs` can be any positive value, the internal
methods of this class will process the user's request and
produce a *realized* sampling rate `Signal.Fs` that is
subject to the following two constraints:
(1) `Signal.Fs <= Fs_dig`, and
(2) `Signal. Fs / Fs_dig = m`, where `m` is an integer
`Fs <= 0` raises a ValueError.
Note: the "raw" signal is returned from the MDSplus server.
Josh Stillerman and Tom Fredian (of MIT, and the developers
of MDSplus) informed me that there is no way to read in only
a portion of the raw data. Thus, the entire raw signal must be
read into memory, and the signal will subsequently be sliced
in time if needed. As a result, specifying a small `Fs` will
not save computational time but will end up saving memory.
[Fs] = samples / second
tlim - array-like, (2,)
The lower and upper limits in time for which the signal
will be retrieved.
The specified `tlim` values will always bracket the retrieved
signal. That is, if `tlim[0]` does not correspond to an exact
digitization time, then the initial time returned (`Signal.t0`)
will be the closest digitization time *greater* than `tlim[0]`.
Similarly, if `tlim[1]` does not correspond to an exact
digitization time, then the final time (`Signal.t[-1]`) will be
the closest digitization time *less* than `tlim[1]`. Further,
if `tlim[0]` is less than the initial digitization time,
the retrieved signal will begin with the initial digitized point.
Similarly, if `tlim[1]` exceeds the final digitization time,
the retrieved signal will end with the final digitized point.
If `tlim` is not `None` and it is *not* a length-two array,
a ValueError is raised.
Note: the "raw" signal is returned from the MDSplus server.
Josh Stillerman and Tom Fredian (of MIT, and the developers
of MDSplus) informed me that there is no way to read in only
a portion of the raw data. Thus, the entire raw signal must be
read into memory, and the signal will subsequently be sliced
in time if needed. As a result, specifying a small `tlim` will
not save computational time but will end up saving memory.
[tlim] = s
Note:
-----
If the `$pci_path` environmental variable is not defined,
the first instance of this class (per Python session)
will automatically create an appropriate definition.
'''
self.shot = shot
self.channel = channel
# Check that `self.channel` is valid
self._checkChannel(channels_per_board=channels_per_board)
# Obtain digitizer board and signal node name for `self.channel`
self._digitizer_board = self._getDigitizerBoard(
channels_per_board=channels_per_board)
self._node_name = self._getNodeName(
channels_per_board=channels_per_board)
# If not already defined, specify the location
# of the host data server(s)
try:
key = 'pci_path'
os.environ[key]
except KeyError:
print '\n`$%s` environmental variable not defined' % key
pci_path = 'hermit.gat.com::/trees/pci'
print 'Defining `$%s` as "%s"' % (key, pci_path)
os.environ[key] = pci_path
# Open the tree and retrieve the signal within the specified
# time window and with the specified sampling rate
mds_tree = mds.Tree('pci', shot=shot, mode='ReadOnly')
self.Fs, self._downsample = self._getSampleRate(mds_tree, Fs=Fs)
if shot != -1:
self.t0, self.x = self._getSignal(mds_tree, tlim=tlim)
def _checkChannel(self, channels_per_board=8):
'Ensure that `self.channel` corresponds to a physical mitpci channel.'
if (self.channel <= 0) or (self.channel > (2 * channels_per_board)):
raise ValueError(
'Valid `channel` values between 1 and %i' %
(2 * channels_per_board))
return
def _getDigitizerBoard(self, channels_per_board=8):
'Get digitizer board corresponding to `self.channel`.'
if self.channel <= channels_per_board:
return 'DT216_7'
else:
return 'DT216_8'
def _getNodeName(self, channels_per_board=8):
'Get node name corresponding to signal digitized in `self.channel`.'
board_channel = 1 + ((self.channel - 1) % channels_per_board)
node_name = '.HARDWARE:%s:INPUT_%s' % (self._digitizer_board,
str(board_channel).zfill(2))
return node_name
def _getSampleRate(self, mds_tree, Fs=None):
'Get signal sampling rate, including effects of desired downsampling.'
node = mds_tree.getNode(
'.HARDWARE:%s:CLOCK_DIV' % self._digitizer_board)
digitizer_rate = np.float(node.data())
# Downsample if desired
if Fs is not None:
if Fs > 0:
downsample = np.int(np.floor(digitizer_rate / Fs))
else:
raise ValueError('`Fs` must be positive')
else:
downsample = 1
return (digitizer_rate / downsample, downsample)
def _getSlice(self, x, tlim=None, t0_dig=0.):
'Get slice for signal retrieval between `tlim` at rate `self.Fs`.'
# Minimum and maximum values for slicing `x`
imin = 0
imax = len(x)
if tlim is not None:
# Ensure limits in time are correctly sized and sorted
if len(tlim) != 2:
raise ValueError('`tlim` must be an iterable of length 2.')
else:
tlim = np.sort(tlim)
# Digitization rate
Fs_dig = self.Fs * self._downsample
# Find slicing indices such that:
# (a) `x[ilo:ihi]` corresponds to the signal within `tlim`, and
# (b) `ilo` and `ihi` are bounded by `imin` and `imax`
#
ilo = np.max([
imin,
np.ceil((tlim[0] - t0_dig) * Fs_dig)])
# If `ihi_exact` is an integer, then `tlim[1]`
# sits *exactly* on a digitized point; to ensure
# we include this point in our slice, we should
# augment `ihi_exact` by +1. If `ihi_exact` is
# *not* an integer, the ceiling operation takes
# care of this concern for us.
ihi_exact = (tlim[1] - t0_dig) * Fs_dig
if ihi_exact != np.int(ihi_exact):
ihi = np.min([imax, np.ceil(ihi_exact)])
else:
ihi = np.min([imax, ihi_exact + 1])
# Ensure indices are of integer type (rather than float)
# to avoid deprecation warnings
ilo = np.int(ilo)
ihi = np.int(ihi)
else:
ilo = imin
ihi = imax
return slice(ilo, ihi, self._downsample)
def _getSignal(self, mds_tree, tlim=None):
'Get signal between `tlim` at sampling rate `self.Fs`.'
# Retrieve full raw signal
node = mds_tree.getNode(self._node_name)
x = node.raw_of().data()
# Determine time at the beginning of digitization record
t0_dig = mds_tree.getNode(
'.HARDWARE:%s:DI3' % self._digitizer_board).data()
# Slice in time, if desired
if (tlim is not None) or (self._downsample > 1):
sl = self._getSlice(x, tlim=tlim, t0_dig=t0_dig)
x = x[sl]
t0 = t0_dig + (sl.start / (self.Fs * self._downsample))
else:
t0 = t0_dig
return t0, x
def t(self):
'Get times for points in `self.x`.'
return self.t0 + (np.arange(self.x.shape[-1]) / self.Fs)
@property
def volts_per_bit(self, Vpp_max=8.):
'''Get the volts per bit of retrieved signal.
Parameters:
-----------
Vpp_max - float
The maximum peak-to-peak voltage capable of being digitized
on `self.channel` during `self.shot`.
[Vpp_range] = V
Returns:
--------
volts_per_bit - float
Conversion factor from bits to volts for digitized signal.
[volts_per_bit] = V / bit
Note: The mitpci tree stores the bits-to-volts conversion factor
for a given *board*. However, such an implementation fails
if the voltage range varies from channel-to-channel on a given board.
(For example, we may decrease the voltage range on the heterodyne
interferometer channels to use more of the digitizer's dynamic range).
The `volts_per_bit` property is a midway point between the
current tree structure and a future tree structure that supports
channel-to-channel variation on a single board.
'''
# The mitpci system uses a "16 bit" digitizer;
# (Note, however, that the two least significant bits
# *cannot* be changed, so the minimum spacing between
# non-equal digitized values is 4 bits. As this is a
# quirk of the least significant bits, it does *not*
# influence the conversion from bits to volts).
bits = 16
# Avoid integer division
return np.float(Vpp_max) / (2 ** bits)
| gpl-2.0 | 5,129,600,131,630,516,000 | 38.423077 | 78 | 0.587805 | false | 3.954984 | false | false | false | 0 |
asmundg/coherence | coherence/backends/picasa_storage.py | 3 | 8160 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2009, Jean-Michel Sizun
# Copyright 2009 Frank Scholz <coherence@beebits.net>
import os.path
import time
from twisted.internet import threads
from twisted.web import server, static
from twisted.web.error import PageRedirect
from coherence.upnp.core.utils import ReverseProxyUriResource
from twisted.internet import task
from coherence.upnp.core import utils
from coherence.upnp.core import DIDLLite
from coherence.backend import BackendStore, BackendItem, Container, LazyContainer, \
AbstractBackendStore
from coherence import log
from urlparse import urlsplit
import gdata.photos.service
import gdata.media
import gdata.geo
class PicasaProxy(ReverseProxyUriResource):
def __init__(self, uri):
ReverseProxyUriResource.__init__(self, uri)
def render(self, request):
if request.received_headers.has_key('referer'):
del request.received_headers['referer']
return ReverseProxyUriResource.render(self, request)
class PicasaPhotoItem(BackendItem):
def __init__(self, photo):
#print photo
self.photo = photo
self.name = photo.summary.text
if self.name is None:
self.name = photo.title.text
self.duration = None
self.size = None
self.mimetype = photo.content.type
self.description = photo.summary.text
self.date = None
self.item = None
self.photo_url = photo.content.src
self.thumbnail_url = photo.media.thumbnail[0].url
self.url = None
self.location = PicasaProxy(self.photo_url)
def replace_by(self, item):
#print photo
self.photo = item.photo
self.name = photo.summary.text
if self.name is None:
self.name = photo.title.text
self.mimetype = self.photo.content.type
self.description = self.photo.summary.text
self.photo_url = self.photo.content.src
self.thumbnail_url = self.photo.media.thumbnail[0].url
self.location = PicasaProxy(self.photo_url)
return True
def get_item(self):
if self.item == None:
upnp_id = self.get_id()
upnp_parent_id = self.parent.get_id()
self.item = DIDLLite.Photo(upnp_id,upnp_parent_id,self.name)
res = DIDLLite.Resource(self.url, 'http-get:*:%s:*' % self.mimetype)
self.item.res.append(res)
self.item.childCount = 0
return self.item
def get_path(self):
return self.url
def get_id(self):
return self.storage_id
class PicasaStore(AbstractBackendStore):
logCategory = 'picasa_store'
implements = ['MediaServer']
description = ('Picasa Web Albums', 'connects to the Picasa Web Albums service and exposes the featured photos and albums for a given user.', None)
options = [{'option':'name', 'text':'Server Name:', 'type':'string','default':'my media','help': 'the name under this MediaServer shall show up with on other UPnP clients'},
{'option':'version','text':'UPnP Version:','type':'int','default':2,'enum': (2,1),'help': 'the highest UPnP version this MediaServer shall support','level':'advance'},
{'option':'uuid','text':'UUID Identifier:','type':'string','help':'the unique (UPnP) identifier for this MediaServer, usually automatically set','level':'advance'},
{'option':'refresh','text':'Refresh period','type':'string'},
{'option':'login','text':'User ID:','type':'string','group':'User Account'},
{'option':'password','text':'Password:','type':'string','group':'User Account'},
]
def __init__(self, server, **kwargs):
AbstractBackendStore.__init__(self, server, **kwargs)
self.name = kwargs.get('name','Picasa Web Albums')
self.refresh = int(kwargs.get('refresh',60))*60
self.login = kwargs.get('userid',kwargs.get('login',''))
self.password = kwargs.get('password','')
rootContainer = Container(None, self.name)
self.set_root_item(rootContainer)
self.AlbumsContainer = LazyContainer(rootContainer, 'My Albums', None, self.refresh, self.retrieveAlbums)
rootContainer.add_child(self.AlbumsContainer)
self.FeaturedContainer = LazyContainer(rootContainer, 'Featured photos', None, self.refresh, self.retrieveFeaturedPhotos)
rootContainer.add_child(self.FeaturedContainer)
self.init_completed()
def __repr__(self):
return self.__class__.__name__
def upnp_init(self):
self.current_connection_id = None
if self.server:
self.server.connection_manager_server.set_variable(0, 'SourceProtocolInfo',
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_TN;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_SM;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_MED;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:DLNA.ORG_PN=JPEG_LRG;DLNA.ORG_OP=01;DLNA.ORG_FLAGS=00f00000000000000000000000000000,'
'http-get:*:image/jpeg:*,'
'http-get:*:image/gif:*,'
'http-get:*:image/png:*',
default=True)
self.wmc_mapping = {'16': self.get_root_id()}
self.gd_client = gdata.photos.service.PhotosService()
self.gd_client.email = self.login
self.gd_client.password = self.password
self.gd_client.source = 'Coherence UPnP backend'
if len(self.login) > 0:
d = threads.deferToThread(self.gd_client.ProgrammaticLogin)
def retrieveAlbums(self, parent=None):
albums = threads.deferToThread(self.gd_client.GetUserFeed)
def gotAlbums(albums):
if albums is None:
print "Unable to retrieve albums"
return
for album in albums.entry:
title = album.title.text
album_id = album.gphoto_id.text
item = LazyContainer(parent, title, album_id, self.refresh, self.retrieveAlbumPhotos, album_id=album_id)
parent.add_child(item, external_id=album_id)
def gotError(error):
print "ERROR: %s" % error
albums.addCallbacks(gotAlbums, gotError)
return albums
def retrieveFeedPhotos (self, parent=None, feed_uri=''):
#print feed_uri
photos = threads.deferToThread(self.gd_client.GetFeed, feed_uri)
def gotPhotos(photos):
if photos is None:
print "Unable to retrieve photos for feed %s" % feed_uri
return
for photo in photos.entry:
photo_id = photo.gphoto_id.text
item = PicasaPhotoItem(photo)
item.parent = parent
parent.add_child(item, external_id=photo_id)
def gotError(error):
print "ERROR: %s" % error
photos.addCallbacks(gotPhotos, gotError)
return photos
def retrieveAlbumPhotos (self, parent=None, album_id=''):
album_feed_uri = '/data/feed/api/user/%s/albumid/%s?kind=photo' % (self.login, album_id)
return self.retrieveFeedPhotos(parent, album_feed_uri)
def retrieveFeaturedPhotos (self, parent=None):
feed_uri = 'http://picasaweb.google.com/data/feed/api/featured'
return self.retrieveFeedPhotos(parent, feed_uri)
| mit | -1,526,840,385,591,570,200 | 37.42029 | 177 | 0.592279 | false | 3.961165 | false | false | false | 0.013358 |
Wingless-Archangel/OWASP-ZSC | lib/encoder/windows_x86/add_random.py | 4 | 2175 | #!/usr/bin/env python
'''
OWASP ZSC
https://www.owasp.org/index.php/OWASP_ZSC_Tool_Project
https://github.com/zscproject/OWASP-ZSC
http://api.z3r0d4y.com/
https://groups.google.com/d/forum/owasp-zsc [ owasp-zsc[at]googlegroups[dot]com ]
'''
import random
import binascii
import string
from core.compatible import version
_version = version()
chars = string.digits + string.ascii_letters
def start(shellcode, job):
for line in shellcode.rsplit('\n'):
if 'push' in line and '$0x' in line and ',' not in line and len(
line) > 14:
data = line.rsplit('push')[1].rsplit('$0x')[1]
t = True
while t:
if _version is 2:
ebx_1 = binascii.b2a_hex(''.join(random.choice(chars)
for i in range(4)))
if _version is 3:
ebx_1 = (binascii.b2a_hex((''.join(random.choice(
chars) for i in range(4))).encode('latin-1'))
).decode('latin-1')
ebx_2 = "%x" % (int(data, 16) - int(ebx_1, 16))
if str('00') not in str(ebx_1) and str('00') not in str(
ebx_2) and '-' in ebx_2 and len(ebx_2.replace(
'-', '')) >= 7 and len(
ebx_1) > 7 and '-' not in ebx_1:
ebx_2 = ebx_2.replace('-', '')
if job == 'exec' or job == 'add_admin' or job == 'dir_create' or job == 'download_exec':
command = '\npush %%ebx\npush $0x%s\npop %%ebx\npush $0x%s\npop %%ecx\nneg %%ecx\nadd %%ebx,%%ecx\npop %%ebx\npush %%ecx\n' % (
str(ebx_1), str(ebx_2))
elif job == 'create_file' or job == 'disable_firewall' or job == 'download_tofile':
command = '\npush %%eax\npush $0x%s\npop %%eax\npush $0x%s\npop %%ecx\nneg %%ecx\nadd %%eax,%%ecx\npop %%eax\npush %%ecx\n' % (
str(ebx_1), str(ebx_2))
shellcode = shellcode.replace(line, command)
t = False
return shellcode
| gpl-3.0 | -4,637,633,731,184,307,000 | 47.333333 | 151 | 0.484138 | false | 3.275602 | false | false | false | 0.002299 |
WeblateOrg/weblate | weblate/billing/migrations/0001_squashed_0020_auto_20200320_1007.py | 2 | 7079 | # Generated by Django 3.0.5 on 2020-04-16 11:35
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import weblate.utils.fields
class Migration(migrations.Migration):
replaces = [
("billing", "0001_squashed_0016_remove_billing_user"),
("billing", "0002_auto_20180905_1400"),
("billing", "0003_billing_owners"),
("billing", "0004_auto_20181021_1249"),
("billing", "0005_auto_20181021_1254"),
("billing", "0006_auto_20181021_1256"),
("billing", "0007_plan_public"),
("billing", "0008_auto_20181024_1151"),
("billing", "0009_auto_20181101_0900"),
("billing", "0010_invoice_amount"),
("billing", "0011_billing_grace_period"),
("billing", "0012_auto_20181207_0843"),
("billing", "0013_auto_20190208_1452"),
("billing", "0014_billing_removal"),
("billing", "0015_auto_20190516_1159"),
("billing", "0016_auto_20190911_1316"),
("billing", "0017_auto_20190919_1101"),
("billing", "0018_plan_slug"),
("billing", "0019_slugify"),
("billing", "0020_auto_20200320_1007"),
]
initial = True
dependencies = [
("trans", "0001_squashed_0143_auto_20180609_1655"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Plan",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=100, unique=True)),
("price", models.IntegerField(default=0)),
("limit_strings", models.IntegerField(default=0)),
("limit_languages", models.IntegerField(default=0)),
("limit_projects", models.IntegerField(default=0)),
("yearly_price", models.IntegerField(default=0)),
("display_limit_languages", models.IntegerField(default=0)),
("display_limit_projects", models.IntegerField(default=0)),
("display_limit_strings", models.IntegerField(default=0)),
("change_access_control", models.BooleanField(default=True)),
("public", models.BooleanField(default=False)),
("slug", models.SlugField(max_length=100, unique=True)),
],
options={},
),
migrations.CreateModel(
name="Billing",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"plan",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="billing.Plan",
verbose_name="Billing plan",
),
),
(
"projects",
models.ManyToManyField(
blank=True, to="trans.Project", verbose_name="Billed projects"
),
),
(
"state",
models.IntegerField(
choices=[
(0, "Active"),
(1, "Trial"),
(2, "Expired"),
(3, "Terminated"),
],
default=0,
verbose_name="Billing state",
),
),
(
"in_limits",
models.BooleanField(
default=True, editable=False, verbose_name="In limits"
),
),
(
"paid",
models.BooleanField(
default=True, editable=False, verbose_name="Paid"
),
),
(
"expiry",
models.DateTimeField(
blank=True,
default=None,
help_text="After expiry removal with 15 days grace period is scheduled.",
null=True,
verbose_name="Trial expiry date",
),
),
(
"owners",
models.ManyToManyField(
blank=True,
to=settings.AUTH_USER_MODEL,
verbose_name="Billing owners",
),
),
("payment", weblate.utils.fields.JSONField(default={}, editable=False)),
(
"grace_period",
models.IntegerField(
default=0, verbose_name="Grace period for payments"
),
),
(
"removal",
models.DateTimeField(
blank=True,
default=None,
help_text="This is automatically set after trial expiry.",
null=True,
verbose_name="Scheduled removal",
),
),
],
),
migrations.CreateModel(
name="Invoice",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("start", models.DateField()),
("end", models.DateField()),
("payment", weblate.utils.fields.JSONField(default={}, editable=False)),
(
"currency",
models.IntegerField(
choices=[(0, "EUR"), (1, "mBTC"), (2, "USD"), (3, "CZK")],
default=0,
),
),
("ref", models.CharField(blank=True, max_length=50)),
("note", models.TextField(blank=True)),
(
"billing",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="billing.Billing",
),
),
("amount", models.FloatField()),
],
options={},
),
]
| gpl-3.0 | -87,137,880,979,684,830 | 35.678756 | 97 | 0.40161 | false | 5.29469 | false | false | false | 0.000848 |
jeremiahyan/odoo | addons/auth_signup/models/res_users.py | 1 | 11459 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from ast import literal_eval
from collections import defaultdict
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.osv import expression
from odoo.tools.misc import ustr
from odoo.addons.base.models.ir_mail_server import MailDeliveryException
from odoo.addons.auth_signup.models.res_partner import SignupError, now
_logger = logging.getLogger(__name__)
class ResUsers(models.Model):
_inherit = 'res.users'
state = fields.Selection(compute='_compute_state', search='_search_state', string='Status',
selection=[('new', 'Never Connected'), ('active', 'Confirmed')])
def _search_state(self, operator, value):
negative = operator in expression.NEGATIVE_TERM_OPERATORS
# In case we have no value
if not value:
return expression.TRUE_DOMAIN if negative else expression.FALSE_DOMAIN
if operator in ['in', 'not in']:
if len(value) > 1:
return expression.FALSE_DOMAIN if negative else expression.TRUE_DOMAIN
if value[0] == 'new':
comp = '!=' if negative else '='
if value[0] == 'active':
comp = '=' if negative else '!='
return [('log_ids', comp, False)]
if operator in ['=', '!=']:
# In case we search against anything else than new, we have to invert the operator
if value != 'new':
operator = expression.TERM_OPERATORS_NEGATION[operator]
return [('log_ids', operator, False)]
return expression.TRUE_DOMAIN
def _compute_state(self):
for user in self:
user.state = 'active' if user.login_date else 'new'
@api.model
def signup(self, values, token=None):
""" signup a user, to either:
- create a new user (no token), or
- create a user for a partner (with token, but no user for partner), or
- change the password of a user (with token, and existing user).
:param values: a dictionary with field values that are written on user
:param token: signup token (optional)
:return: (dbname, login, password) for the signed up user
"""
if token:
# signup with a token: find the corresponding partner id
partner = self.env['res.partner']._signup_retrieve_partner(token, check_validity=True, raise_exception=True)
# invalidate signup token
partner.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
partner_user = partner.user_ids and partner.user_ids[0] or False
# avoid overwriting existing (presumably correct) values with geolocation data
if partner.country_id or partner.zip or partner.city:
values.pop('city', None)
values.pop('country_id', None)
if partner.lang:
values.pop('lang', None)
if partner_user:
# user exists, modify it according to values
values.pop('login', None)
values.pop('name', None)
partner_user.write(values)
if not partner_user.login_date:
partner_user._notify_inviter()
return (self.env.cr.dbname, partner_user.login, values.get('password'))
else:
# user does not exist: sign up invited user
values.update({
'name': partner.name,
'partner_id': partner.id,
'email': values.get('email') or values.get('login'),
})
if partner.company_id:
values['company_id'] = partner.company_id.id
values['company_ids'] = [(6, 0, [partner.company_id.id])]
partner_user = self._signup_create_user(values)
partner_user._notify_inviter()
else:
# no token, sign up an external user
values['email'] = values.get('email') or values.get('login')
self._signup_create_user(values)
return (self.env.cr.dbname, values.get('login'), values.get('password'))
@api.model
def _get_signup_invitation_scope(self):
return self.env['ir.config_parameter'].sudo().get_param('auth_signup.invitation_scope', 'b2b')
@api.model
def _signup_create_user(self, values):
""" signup a new user using the template user """
# check that uninvited users may sign up
if 'partner_id' not in values:
if self._get_signup_invitation_scope() != 'b2c':
raise SignupError(_('Signup is not allowed for uninvited users'))
return self._create_user_from_template(values)
def _notify_inviter(self):
for user in self:
invite_partner = user.create_uid.partner_id
if invite_partner:
# notify invite user that new user is connected
title = _("%s connected", user.name)
message = _("This is their first connection. Wish them luck.")
self.env['bus.bus'].sendone(
(self._cr.dbname, 'res.partner', invite_partner.id),
{'type': 'user_connection', 'title': title,
'message': message, 'partner_id': user.partner_id.id}
)
def _create_user_from_template(self, values):
template_user_id = literal_eval(self.env['ir.config_parameter'].sudo().get_param('base.template_portal_user_id', 'False'))
template_user = self.browse(template_user_id)
if not template_user.exists():
raise ValueError(_('Signup: invalid template user'))
if not values.get('login'):
raise ValueError(_('Signup: no login given for new user'))
if not values.get('partner_id') and not values.get('name'):
raise ValueError(_('Signup: no name or partner given for new user'))
# create a copy of the template user (attached to a specific partner_id if given)
values['active'] = True
try:
with self.env.cr.savepoint():
return template_user.with_context(no_reset_password=True).copy(values)
except Exception as e:
# copy may failed if asked login is not available.
raise SignupError(ustr(e))
def reset_password(self, login):
""" retrieve the user corresponding to login (login or email),
and reset their password
"""
users = self.search([('login', '=', login)])
if not users:
users = self.search([('email', '=', login)])
if len(users) != 1:
raise Exception(_('Reset password: invalid username or email'))
return users.action_reset_password()
def action_reset_password(self):
""" create signup token for each user, and send their signup url by email """
if self.env.context.get('install_mode', False):
return
if self.filtered(lambda user: not user.active):
raise UserError(_("You cannot perform this action on an archived user."))
# prepare reset password signup
create_mode = bool(self.env.context.get('create_user'))
# no time limit for initial invitation, only for reset password
expiration = False if create_mode else now(days=+1)
self.mapped('partner_id').signup_prepare(signup_type="reset", expiration=expiration)
# send email to users with their signup url
template = False
if create_mode:
try:
template = self.env.ref('auth_signup.set_password_email', raise_if_not_found=False)
except ValueError:
pass
if not template:
template = self.env.ref('auth_signup.reset_password_email')
assert template._name == 'mail.template'
email_values = {
'email_to': '${object.email|safe}',
'email_cc': False,
'auto_delete': True,
'recipient_ids': [],
'partner_ids': [],
'scheduled_date': False,
}
for user in self:
if not user.email:
raise UserError(_("Cannot send email: user %s has no email address.", user.name))
# TDE FIXME: make this template technical (qweb)
with self.env.cr.savepoint():
force_send = not(self.env.context.get('import_file', False))
template.send_mail(user.id, force_send=force_send, raise_exception=True, email_values=email_values)
_logger.info("Password reset email sent for user <%s> to <%s>", user.login, user.email)
def send_unregistered_user_reminder(self, after_days=5):
datetime_min = fields.Datetime.today() - relativedelta(days=after_days)
datetime_max = datetime_min + relativedelta(hours=23, minutes=59, seconds=59)
res_users_with_details = self.env['res.users'].search_read([
('share', '=', False),
('create_uid.email', '!=', False),
('create_date', '>=', datetime_min),
('create_date', '<=', datetime_max),
('log_ids', '=', False)], ['create_uid', 'name', 'login'])
# group by invited by
invited_users = defaultdict(list)
for user in res_users_with_details:
invited_users[user.get('create_uid')[0]].append("%s (%s)" % (user.get('name'), user.get('login')))
# For sending mail to all the invitors about their invited users
for user in invited_users:
template = self.env.ref('auth_signup.mail_template_data_unregistered_users').with_context(dbname=self._cr.dbname, invited_users=invited_users[user])
template.send_mail(user, notif_layout='mail.mail_notification_light', force_send=False)
@api.model
def web_create_users(self, emails):
inactive_users = self.search([('state', '=', 'new'), '|', ('login', 'in', emails), ('email', 'in', emails)])
new_emails = set(emails) - set(inactive_users.mapped('email'))
res = super(ResUsers, self).web_create_users(list(new_emails))
if inactive_users:
inactive_users.with_context(create_user=True).action_reset_password()
return res
@api.model_create_multi
def create(self, vals_list):
# overridden to automatically invite user to sign up
users = super(ResUsers, self).create(vals_list)
if not self.env.context.get('no_reset_password'):
users_with_email = users.filtered('email')
if users_with_email:
try:
users_with_email.with_context(create_user=True).action_reset_password()
except MailDeliveryException:
users_with_email.partner_id.with_context(create_user=True).signup_cancel()
return users
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
self.ensure_one()
sup = super(ResUsers, self)
if not default or not default.get('email'):
# avoid sending email to the user we are duplicating
sup = super(ResUsers, self.with_context(no_reset_password=True))
return sup.copy(default=default)
| gpl-3.0 | 963,764,320,146,704,600 | 43.414729 | 160 | 0.590715 | false | 4.163881 | false | false | false | 0.003142 |
hgiemza/DIRAC | Core/Workflow/WorkflowReader.py | 5 | 4858 | """
This is a comment
"""
#try: # this part to import as part of the DIRAC framework
import xml.sax
from xml.sax.handler import ContentHandler
from DIRAC.Core.Workflow.Parameter import *
from DIRAC.Core.Workflow.Module import *
from DIRAC.Core.Workflow.Step import *
from DIRAC.Core.Workflow.Workflow import Workflow
__RCSID__ = "$Id$"
class WorkflowXMLHandler(ContentHandler):
def __init__(self, new_wf=None):
""" If new_wf defined, it will be used as root of document """
# this is an attribute for the object to be created from the XML document
self.root=new_wf # the reference on the all document
self.stack=None # to keep last object
self.strings=None # to accumulate string object (list of strings) used to split long string
def startDocument(self):
#reset the process
#self.root=None
self.stack=[]
self.strings=[]
def endDocument(self):
pass
def startElement(self, name, attrs):
#print name ,"startElement", "attr=", attrs.getLength(), attrs.getNames()
self.clearCharacters() # clear to remove empty or nonprintable characters
if name == "Workflow":
if self.root == None: #if root not defined by constractor
self.root = Workflow()
self.stack.append(self.root)
elif name == "StepDefinition":
obj = StepDefinition("TemporaryXMLObject_StepDefinition")
if self.root == None: # in case we are saving Step only
self.root = obj
self.stack.append(obj)
elif name == "StepInstance":
obj = StepInstance("TemporaryXMLObject_StepInstance")
self.stack.append(obj)
elif name == "ModuleDefinition":
obj = ModuleDefinition("TemporaryXMLObject_ModuleDefinition")
if self.root == None: # in case we are saving Module only
self.root = obj
self.stack.append(obj)
elif name == "ModuleInstance":
obj = ModuleInstance("TemporaryXMLObject_ModuleInstance")
self.stack.append(obj)
elif name == "Parameter":
obj = Parameter(str(attrs['name']), None, str(attrs['type']), str(attrs['linked_module']), str(attrs['linked_parameter']), str(attrs['in']), str(attrs['out']), str(attrs['description']))
self.stack.append(obj)
# TEMPORARY CODE
elif name=="origin" or name == "version" or name == "name" or name == "type" or name == "value" or\
name == "required" or name == "descr_short" or name == "name" or name == "type" or name == "description" or name == "body":
pass
else:
print "UNTREATED! startElement name=", name, "attr=", attrs.getLength(), attrs.getNames()
pass
def endElement(self, name):
#print name, "endElement"
# attributes
if name=="origin":
self.stack[len(self.stack)-1].setOrigin(self.getCharacters())
elif name == "version":
self.stack[len(self.stack)-1].setVersion(self.getCharacters())
elif name == "name":
self.stack[len(self.stack)-1].setName(self.getCharacters())
elif name == "type":
self.stack[len(self.stack)-1].setType(self.getCharacters())
elif name == "required":
self.stack[len(self.stack)-1].setRequired(self.getCharacters())
elif name == "descr_short":
self.stack[len(self.stack)-1].setDescrShort(self.getCharacters())
elif name == "name":
self.stack[len(self.stack)-1].setName(self.getCharacters())
elif name == "type":
self.stack[len(self.stack)-1].setType(self.getCharacters())
elif name == "description":
self.stack[len(self.stack)-1].setDescription(self.getCharacters())
elif name == "body":
self.stack[len(self.stack)-1].setBody(self.getCharacters())
elif name == "value":
ch = self.getCharacters()
# to keep compatibility with the old version
# were """ was not used for the string
if self.stack[len(self.stack)-1].isTypeString():
self.stack[len(self.stack)-1].setValue(ch)
else:
self.stack[len(self.stack)-1].setValue(eval(ch))
#objects
elif name=="Workflow":
self.stack.pop()
elif name == "StepDefinition":
self.root.step_definitions.append(self.stack.pop())
elif name == "StepInstance":
self.root.step_instances.append(self.stack.pop())
elif name == "ModuleDefinition":
self.root.addModule(self.stack.pop())
elif name == "ModuleInstance":
obj=self.stack.pop()
self.stack[len(self.stack)-1].module_instances.append(obj)
elif name == "Parameter":
obj=self.stack.pop();
self.stack[len(self.stack)-1].addParameter(obj)
else:
print "UNTREATED! endElement", name
def getCharacters(self):
# combine all strings and clear the list
ret = ''.join(self.strings)
self.clearCharacters()
return str(ret)
def clearCharacters(self):
del self.strings
self.strings=[]
def characters(self, content):
self.strings.append(content)
| gpl-3.0 | 8,925,888,873,492,360,000 | 33.94964 | 192 | 0.656237 | false | 3.728319 | false | false | false | 0.01832 |
LUTAN/tensorflow | tensorflow/contrib/losses/python/losses/loss_ops.py | 30 | 27668 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss operations for use in neural networks.
Note: All the losses are added to the `GraphKeys.LOSSES` collection.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.deprecation import deprecated
__all__ = ["absolute_difference",
"add_loss",
"cosine_distance",
"compute_weighted_loss",
"get_losses",
"get_regularization_losses",
"get_total_loss",
"hinge_loss",
"log_loss",
"mean_pairwise_squared_error",
"mean_squared_error",
"sigmoid_cross_entropy",
"softmax_cross_entropy",
"sparse_softmax_cross_entropy"]
def _scale_losses(losses, weights):
"""Computes the scaled loss.
Args:
losses: A `Tensor` of size [batch_size, d1, ... dN].
weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weights` at which point the reduced `losses` are element-wise
multiplied by `weights` and a final reduce_sum is computed on the result.
Conceptually, this operation is equivalent to broadcasting (tiling)
`weights` to be the same size as `losses`, performing an element-wise
multiplication, and summing the result.
Returns:
A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
start_index = max(0, weights.get_shape().ndims)
reduction_indices = list(range(start_index, losses.get_shape().ndims))
reduced_losses = math_ops.reduce_sum(losses,
reduction_indices=reduction_indices)
reduced_losses = math_ops.multiply(reduced_losses, weights)
return math_ops.reduce_sum(reduced_losses)
def _safe_div(numerator, denominator, name="value"):
"""Computes a safe divide which returns 0 if the denominator is zero.
Note that the function contains an additional conditional check that is
necessary for avoiding situations where the loss is zero causing NaNs to
creep into the gradient computation.
Args:
numerator: An arbitrary `Tensor`.
denominator: A `Tensor` whose shape matches `numerator` and whose values are
assumed to be non-negative.
name: An optional name for the returned op.
Returns:
The element-wise value of the numerator divided by the denominator.
"""
return array_ops.where(
math_ops.greater(denominator, 0),
math_ops.div(numerator, array_ops.where(
math_ops.equal(denominator, 0),
array_ops.ones_like(denominator), denominator)),
array_ops.zeros_like(numerator),
name=name)
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: A tensor whose elements contain individual loss measurements.
num_present: The number of measurable losses in the tensor.
Returns:
A scalar representing the mean of the losses. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return _safe_div(total_loss, num_present)
@deprecated("2016-12-30", "Use tf.losses.compute_weighted_loss instead.")
def compute_weighted_loss(losses, weights=1.0, scope=None):
"""Computes the weighted loss.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
"""
with ops.name_scope(scope, "weighted_loss", [losses, weights]):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.to_float(losses)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
if losses.get_shape().ndims is None:
raise ValueError("losses.get_shape().ndims cannot be None")
weights_shape = weights.get_shape()
if weights_shape.ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
if weights_shape.ndims > 1 and weights_shape.dims[-1].is_compatible_with(1):
weights = array_ops.squeeze(weights, [-1])
total_loss = _scale_losses(losses, weights)
num_present = _num_present(losses, weights)
mean_loss = _safe_mean(total_loss, num_present)
# convert the result back to the input type
mean_loss = math_ops.cast(mean_loss, input_dtype)
add_loss(mean_loss)
return mean_loss
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
[4, 5, 6, 3] and `weights` is a tensor of size [4, 5], then `weights` is, in
effect, tiled to match the size of `losses`. Following this effective tile,
the total number of present elements is the number of non-zero weights.
Args:
losses: A tensor of size [batch_size, d1, ... dN].
weights: A tensor of size [1] or [batch_size, d1, ... dK] where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is True, the value is returned as a tensor of size
[batch_size]. Otherwise, a single scalar tensor is returned.
"""
# If weights is a scalar, its easy to compute:
if weights.get_shape().ndims == 0:
batch_size = array_ops.reshape(array_ops.slice(array_ops.shape(losses),
[0], [1]), [])
num_per_batch = math_ops.div(math_ops.to_float(array_ops.size(losses)),
math_ops.to_float(batch_size))
num_per_batch = array_ops.where(math_ops.equal(weights, 0),
0.0, num_per_batch)
num_per_batch = math_ops.multiply(array_ops.ones(
array_ops.reshape(batch_size, [1])), num_per_batch)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
# First, count the number of nonzero weights:
if weights.get_shape().ndims >= 1:
reduction_indices = list(range(1, weights.get_shape().ndims))
num_nonzero_per_batch = math_ops.reduce_sum(
math_ops.to_float(math_ops.not_equal(weights, 0)),
reduction_indices=reduction_indices)
# Next, determine the number of elements that weights would broadcast to:
broadcast_dims = array_ops.slice(array_ops.shape(losses),
[weights.get_shape().ndims], [-1])
num_to_broadcast = math_ops.to_float(math_ops.reduce_prod(broadcast_dims))
num_per_batch = math_ops.multiply(num_nonzero_per_batch, num_to_broadcast)
return num_per_batch if per_batch else math_ops.reduce_sum(num_per_batch)
@deprecated("2016-12-30", "Use tf.losses.add_loss instead.")
@add_arg_scope
def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):
"""Adds a externally defined loss to the collection of losses.
Args:
loss: A loss `Tensor`.
loss_collection: Optional collection to add the loss to.
"""
if loss_collection:
ops.add_to_collection(loss_collection, loss)
@deprecated("2016-12-30", "Use tf.losses.get_losses instead.")
def get_losses(scope=None, loss_collection=ops.GraphKeys.LOSSES):
"""Gets the list of losses from the loss_collection.
Args:
scope: an optional scope for filtering the losses to return.
loss_collection: Optional losses collection.
Returns:
a list of loss tensors.
"""
return ops.get_collection(loss_collection, scope)
@deprecated("2016-12-30", "Use tf.losses.get_regularization_losses instead.")
def get_regularization_losses(scope=None):
"""Gets the regularization losses.
Args:
scope: an optional scope for filtering the losses to return.
Returns:
A list of loss variables.
"""
return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)
@deprecated("2016-12-30", "Use tf.losses.get_total_loss instead.")
def get_total_loss(add_regularization_losses=True, name="total_loss"):
"""Returns a tensor whose value represents the total loss.
Notice that the function adds the given losses to the regularization losses.
Args:
add_regularization_losses: A boolean indicating whether or not to use the
regularization losses in the sum.
name: The name of the returned tensor.
Returns:
A `Tensor` whose value represents the total loss.
Raises:
ValueError: if `losses` is not iterable.
"""
losses = get_losses()
if add_regularization_losses:
losses += get_regularization_losses()
return math_ops.add_n(losses, name=name)
@deprecated("2016-12-30", "Use tf.losses.absolute_difference instead.")
def absolute_difference(predictions, labels=None, weights=1.0, scope=None):
"""Adds an Absolute Difference loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "absolute_difference",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.sigmoid_cross_entropy instead. Note that the order "
"of the predictions and labels arguments was changed.")
def sigmoid_cross_entropy(
logits, multi_class_labels, weights=1.0, label_smoothing=0, scope=None):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
logits: [batch_size, num_classes] logits outputs of the network .
multi_class_labels: [batch_size, num_classes] labels in (0, 1).
weights: Coefficients for the loss. The tensor must be a scalar, a tensor of
shape [batch_size] or shape [batch_size, num_classes].
label_smoothing: If greater than 0 then smooth the labels.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None.
"""
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
[logits, multi_class_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.softmax_cross_entropy instead. Note that the order "
"of the logits and labels arguments has been changed.")
def softmax_cross_entropy(
logits, onehot_labels, weights=1.0, label_smoothing=0, scope=None):
"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Args:
logits: [batch_size, num_classes] logits outputs of the network .
onehot_labels: [batch_size, num_classes] one-hot-encoded labels.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size].
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weights` is invalid or if `weights` is None.
"""
with ops.name_scope(scope, "softmax_cross_entropy_loss",
[logits, onehot_labels, weights]) as scope:
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
losses = nn.softmax_cross_entropy_with_logits(labels=onehot_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.sparse_softmax_cross_entropy instead. Note that "
"the order of the logits and labels arguments has been changed.")
def sparse_softmax_cross_entropy(logits, labels, weights=1.0, scope=None):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of size [`batch_size`], then the loss weights apply to each
corresponding sample.
Args:
logits: [batch_size, num_classes] logits outputs of the network .
labels: [batch_size, 1] or [batch_size] labels of dtype `int32` or `int64`
in the range `[0, num_classes)`.
weights: Coefficients for the loss. The tensor must be a scalar or a tensor
of shape [batch_size] or [batch_size, 1].
scope: the scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the mean loss value.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if `weights` is None.
"""
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
[logits, labels, weights]) as scope:
labels = array_ops.reshape(labels, shape=[array_ops.shape(labels)[0]])
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.log_loss instead. Note that the order of the "
"predictions and labels arguments was changed.")
def log_loss(predictions, labels=None, weights=1.0, epsilon=1e-7, scope=None):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "log_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = -math_ops.multiply(
labels,
math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.hinge_loss instead. Note that the order of the "
"predictions and labels arguments were changed.")
def hinge_loss(logits, labels=None, scope=None):
"""Method that returns the loss tensor for hinge loss.
Args:
logits: The logits, a float tensor.
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0.
scope: The scope for the operations performed in computing the loss.
Returns:
A `Tensor` of same shape as `logits` and `labels` representing the loss
values across the batch.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match.
"""
with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
labels = math_ops.to_float(labels)
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
return nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
@deprecated("2016-12-30", "Use tf.losses.mean_squared_error instead.")
def mean_squared_error(predictions, labels=None, weights=1.0, scope=None):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
predictions: The predicted outputs.
labels: The ground truth output tensor, same dimensions as 'predictions'.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
losses = math_ops.square(math_ops.subtract(predictions, labels))
return compute_weighted_loss(losses, weights, scope=scope)
@deprecated("2016-12-30",
"Use tf.losses.mean_pairwise_squared_error instead. Note that the "
"order of the predictions and labels arguments was changed.")
def mean_pairwise_squared_error(
predictions, labels=None, weights=1.0, scope=None):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of size [batch_size, d0, ... dN], the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
[batch_size], then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
predictions: The predicted outputs, a tensor of size [batch_size, d0, .. dN]
where N+1 is the total number of dimensions in `predictions`.
labels: The ground truth output tensor, whose shape must match the shape of
the `predictions` tensor.
weights: Coefficients for the loss a scalar, a tensor of shape [batch_size]
or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid.
"""
with ops.name_scope(scope, "mean_pairwise_squared_error",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
weights = math_ops.to_float(ops.convert_to_tensor(weights))
diffs = math_ops.subtract(predictions, labels)
# Need to verify here since the function doesn't use compute_weighted_loss
if diffs.get_shape().ndims is None:
raise ValueError("diffs.get_shape().ndims cannot be None")
if weights.get_shape().ndims is None:
raise ValueError("weights.get_shape().ndims cannot be None")
reduction_indices = list(range(1, diffs.get_shape().ndims))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs),
reduction_indices=reduction_indices)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * _safe_div(sum_squares_diff_per_batch,
num_present_per_batch)
sum_diff = math_ops.reduce_sum(diffs, reduction_indices=reduction_indices)
term2 = 2.0 * _safe_div(math_ops.square(sum_diff),
math_ops.square(num_present_per_batch))
loss = _scale_losses(term1 - term2, weights)
mean_loss = array_ops.where(math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
add_loss(mean_loss)
return mean_loss
@deprecated("2016-12-30", "Use tf.losses.cosine_distance instead.")
def cosine_distance(
predictions, labels=None, dim=None, weights=1.0, scope=None):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
predictions: An arbitrary matrix.
labels: A `Tensor` whose shape matches 'predictions'
dim: The dimension along which the cosine distance is computed.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
Returns:
A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`weights` is `None`.
"""
if dim is None:
raise ValueError("`dim` cannot be None.")
with ops.name_scope(scope, "cosine_distance_loss",
[predictions, labels, weights]) as scope:
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, reduction_indices=[dim,])
return compute_weighted_loss(losses, weights, scope=scope)
| apache-2.0 | -5,206,528,796,304,377,000 | 41.048632 | 80 | 0.681979 | false | 3.897999 | false | false | false | 0.002928 |
inclement/kivy | kivy/uix/behaviors/button.py | 18 | 6290 | '''
Button Behavior
===============
The :class:`~kivy.uix.behaviors.button.ButtonBehavior`
`mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides
:class:`~kivy.uix.button.Button` behavior. You can combine this class with
other widgets, such as an :class:`~kivy.uix.image.Image`, to provide
alternative buttons that preserve Kivy button behavior.
For an overview of behaviors, please refer to the :mod:`~kivy.uix.behaviors`
documentation.
Example
-------
The following example adds button behavior to an image to make a checkbox that
behaves like a button::
from kivy.app import App
from kivy.uix.image import Image
from kivy.uix.behaviors import ButtonBehavior
class MyButton(ButtonBehavior, Image):
def __init__(self, **kwargs):
super(MyButton, self).__init__(**kwargs)
self.source = 'atlas://data/images/defaulttheme/checkbox_off'
def on_press(self):
self.source = 'atlas://data/images/defaulttheme/checkbox_on'
def on_release(self):
self.source = 'atlas://data/images/defaulttheme/checkbox_off'
class SampleApp(App):
def build(self):
return MyButton()
SampleApp().run()
See :class:`~kivy.uix.behaviors.ButtonBehavior` for details.
'''
__all__ = ('ButtonBehavior', )
from kivy.clock import Clock
from kivy.config import Config
from kivy.properties import OptionProperty, ObjectProperty, \
BooleanProperty, NumericProperty
from time import time
class ButtonBehavior(object):
'''
This `mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides
:class:`~kivy.uix.button.Button` behavior. Please see the
:mod:`button behaviors module <kivy.uix.behaviors.button>` documentation
for more information.
:Events:
`on_press`
Fired when the button is pressed.
`on_release`
Fired when the button is released (i.e. the touch/click that
pressed the button goes away).
'''
state = OptionProperty('normal', options=('normal', 'down'))
'''The state of the button, must be one of 'normal' or 'down'.
The state is 'down' only when the button is currently touched/clicked,
otherwise its 'normal'.
:attr:`state` is an :class:`~kivy.properties.OptionProperty` and defaults
to 'normal'.
'''
last_touch = ObjectProperty(None)
'''Contains the last relevant touch received by the Button. This can
be used in `on_press` or `on_release` in order to know which touch
dispatched the event.
.. versionadded:: 1.8.0
:attr:`last_touch` is a :class:`~kivy.properties.ObjectProperty` and
defaults to `None`.
'''
min_state_time = NumericProperty(0)
'''The minimum period of time which the widget must remain in the
`'down'` state.
.. versionadded:: 1.9.1
:attr:`min_state_time` is a float and defaults to 0.035. This value is
taken from :class:`~kivy.config.Config`.
'''
always_release = BooleanProperty(False)
'''This determines whether or not the widget fires an `on_release` event if
the touch_up is outside the widget.
.. versionadded:: 1.9.0
.. versionchanged:: 1.10.0
The default value is now False.
:attr:`always_release` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `False`.
'''
def __init__(self, **kwargs):
self.register_event_type('on_press')
self.register_event_type('on_release')
if 'min_state_time' not in kwargs:
self.min_state_time = float(Config.get('graphics',
'min_state_time'))
super(ButtonBehavior, self).__init__(**kwargs)
self.__state_event = None
self.__touch_time = None
self.fbind('state', self.cancel_event)
def _do_press(self):
self.state = 'down'
def _do_release(self, *args):
self.state = 'normal'
def cancel_event(self, *args):
if self.__state_event:
self.__state_event.cancel()
self.__state_event = None
def on_touch_down(self, touch):
if super(ButtonBehavior, self).on_touch_down(touch):
return True
if touch.is_mouse_scrolling:
return False
if not self.collide_point(touch.x, touch.y):
return False
if self in touch.ud:
return False
touch.grab(self)
touch.ud[self] = True
self.last_touch = touch
self.__touch_time = time()
self._do_press()
self.dispatch('on_press')
return True
def on_touch_move(self, touch):
if touch.grab_current is self:
return True
if super(ButtonBehavior, self).on_touch_move(touch):
return True
return self in touch.ud
def on_touch_up(self, touch):
if touch.grab_current is not self:
return super(ButtonBehavior, self).on_touch_up(touch)
assert(self in touch.ud)
touch.ungrab(self)
self.last_touch = touch
if (not self.always_release and
not self.collide_point(*touch.pos)):
self._do_release()
return
touchtime = time() - self.__touch_time
if touchtime < self.min_state_time:
self.__state_event = Clock.schedule_once(
self._do_release, self.min_state_time - touchtime)
else:
self._do_release()
self.dispatch('on_release')
return True
def on_press(self):
pass
def on_release(self):
pass
def trigger_action(self, duration=0.1):
'''Trigger whatever action(s) have been bound to the button by calling
both the on_press and on_release callbacks.
This simulates a quick button press without using any touch events.
Duration is the length of the press in seconds. Pass 0 if you want
the action to happen instantly.
.. versionadded:: 1.8.0
'''
self._do_press()
self.dispatch('on_press')
def trigger_release(dt):
self._do_release()
self.dispatch('on_release')
if not duration:
trigger_release(0)
else:
Clock.schedule_once(trigger_release, duration)
| mit | 6,915,083,310,421,695,000 | 29.240385 | 79 | 0.615103 | false | 3.993651 | false | false | false | 0.000159 |
leilihh/novaha | nova/api/openstack/compute/plugins/v3/cells.py | 11 | 13346 | # Copyright 2011-2012 OpenStack Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The cells extension."""
from oslo.config import cfg
from oslo import messaging
import six
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova import rpc
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
ALIAS = "os-cells"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def _filter_keys(item, keys):
"""Filters all model attributes except for keys
item is a dict
"""
return dict((k, v) for k, v in item.iteritems() if k in keys)
def _fixup_cell_info(cell_info, keys):
"""If the transport_url is present in the cell, derive username,
rpc_host, and rpc_port from it.
"""
if 'transport_url' not in cell_info:
return
# Disassemble the transport URL
transport_url = cell_info.pop('transport_url')
try:
transport_url = rpc.get_transport_url(transport_url)
except messaging.InvalidTransportURL:
# Just go with None's
for key in keys:
cell_info.setdefault(key, None)
return
if not transport_url.hosts:
return
transport_host = transport_url.hosts[0]
transport_field_map = {'rpc_host': 'hostname', 'rpc_port': 'port'}
for key in keys:
if key in cell_info:
continue
transport_field = transport_field_map.get(key, key)
cell_info[key] = getattr(transport_host, transport_field)
def _scrub_cell(cell, detail=False):
keys = ['name', 'username', 'rpc_host', 'rpc_port']
if detail:
keys.append('capabilities')
cell_info = _filter_keys(cell, keys + ['transport_url'])
_fixup_cell_info(cell_info, keys)
cell_info['type'] = 'parent' if cell['is_parent'] else 'child'
return cell_info
class CellsController(object):
"""Controller for Cell resources."""
def __init__(self):
self.compute_api = compute.API()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
def _get_cells(self, ctxt, req, detail=False):
"""Return all cells."""
# Ask the CellsManager for the most recent data
items = self.cells_rpcapi.get_cell_info_for_neighbors(ctxt)
items = common.limited(items, req)
items = [_scrub_cell(item, detail=detail) for item in items]
return dict(cells=items)
@extensions.expected_errors(501)
@common.check_cells_enabled
def index(self, req):
"""Return all cells in brief."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req)
@extensions.expected_errors(501)
@common.check_cells_enabled
def detail(self, req):
"""Return all cells in detail."""
ctxt = req.environ['nova.context']
authorize(ctxt)
return self._get_cells(ctxt, req, detail=True)
@extensions.expected_errors(501)
@common.check_cells_enabled
def info(self, req):
"""Return name and capabilities for this cell."""
context = req.environ['nova.context']
authorize(context)
cell_capabs = {}
my_caps = CONF.cells.capabilities
for cap in my_caps:
key, value = cap.split('=')
cell_capabs[key] = value
cell = {'name': CONF.cells.name,
'type': 'self',
'rpc_host': None,
'rpc_port': 0,
'username': None,
'capabilities': cell_capabs}
return dict(cell=cell)
@extensions.expected_errors((404, 501))
@common.check_cells_enabled
def capacities(self, req, id=None):
"""Return capacities for a given cell or all cells."""
# TODO(kaushikc): return capacities as a part of cell info and
# cells detail calls in v3, along with capabilities
context = req.environ['nova.context']
authorize(context)
try:
capacities = self.cells_rpcapi.get_capacities(context,
cell_name=id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell={"capacities": capacities})
@extensions.expected_errors((404, 501))
@common.check_cells_enabled
def show(self, req, id):
"""Return data about the given cell name. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
cell = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((403, 404, 501))
@common.check_cells_enabled
@wsgi.response(204)
def delete(self, req, id):
"""Delete a child or parent cell entry. 'id' is a cell name."""
context = req.environ['nova.context']
authorize(context)
try:
num_deleted = self.cells_rpcapi.cell_delete(context, id)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
if num_deleted == 0:
raise exc.HTTPNotFound(
explanation=_("Cell %s doesn't exist.") % id)
def _validate_cell_name(self, cell_name):
"""Validate cell name is not empty and doesn't contain '!' or '.'."""
if not cell_name:
msg = _("Cell name cannot be empty")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
if '!' in cell_name or '.' in cell_name:
msg = _("Cell name cannot contain '!' or '.'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _validate_cell_type(self, cell_type):
"""Validate cell_type is 'parent' or 'child'."""
if cell_type not in ['parent', 'child']:
msg = _("Cell type must be 'parent' or 'child'")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
def _normalize_cell(self, cell, existing=None):
"""Normalize input cell data. Normalizations include:
* Converting cell['type'] to is_parent boolean.
* Merging existing transport URL with transport information.
"""
# Start with the cell type conversion
if 'type' in cell:
self._validate_cell_type(cell['type'])
cell['is_parent'] = cell['type'] == 'parent'
del cell['type']
# Avoid cell type being overwritten to 'child'
elif existing:
cell['is_parent'] = existing['is_parent']
else:
cell['is_parent'] = False
# Now we disassemble the existing transport URL...
transport_url = existing.get('transport_url') if existing else None
transport_url = rpc.get_transport_url(transport_url)
if 'rpc_virtual_host' in cell:
transport_url.virtual_host = cell.pop('rpc_virtual_host')
if not transport_url.hosts:
transport_url.hosts.append(messaging.TransportHost())
transport_host = transport_url.hosts[0]
# Copy over the input fields
transport_field_map = {
'username': 'username',
'password': 'password',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
for key, input_field in transport_field_map.items():
# Only override the value if we're given an override
if input_field in cell:
setattr(transport_host, key, cell.pop(input_field))
# Now set the transport URL
cell['transport_url'] = str(transport_url)
@extensions.expected_errors((400, 403, 501))
@common.check_cells_enabled
@wsgi.response(201)
def create(self, req, body):
"""Create a child cell entry."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
if 'name' not in cell:
msg = _("No cell name in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
self._validate_cell_name(cell['name'])
self._normalize_cell(cell)
try:
cell = self.cells_rpcapi.cell_create(context, cell)
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((400, 403, 404, 501))
@common.check_cells_enabled
def update(self, req, id, body):
"""Update a child cell entry. 'id' is the cell name to update."""
context = req.environ['nova.context']
authorize(context)
if 'cell' not in body:
msg = _("No cell information in request")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
cell = body['cell']
cell.pop('id', None)
if 'name' in cell:
self._validate_cell_name(cell['name'])
try:
# NOTE(Vek): There is a race condition here if multiple
# callers are trying to update the cell
# information simultaneously. Since this
# operation is administrative in nature, and
# will be going away in the future, I don't see
# it as much of a problem...
existing = self.cells_rpcapi.cell_get(context, id)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
self._normalize_cell(cell, existing)
try:
cell = self.cells_rpcapi.cell_update(context, id, cell)
except exception.CellNotFound as e:
raise exc.HTTPNotFound(explanation=e.format_message())
except exception.CellsUpdateUnsupported as e:
raise exc.HTTPForbidden(explanation=e.format_message())
return dict(cell=_scrub_cell(cell))
@extensions.expected_errors((400, 501))
@common.check_cells_enabled
@wsgi.response(204)
def sync_instances(self, req, body):
"""Tell all cells to sync instance info."""
context = req.environ['nova.context']
authorize(context)
project_id = body.pop('project_id', None)
deleted = body.pop('deleted', False)
updated_since = body.pop('updated_since', None)
if body:
msg = _("Only 'updated_since', 'project_id' and 'deleted' are "
"understood.")
raise exc.HTTPBadRequest(explanation=msg)
if isinstance(deleted, six.string_types):
try:
deleted = strutils.bool_from_string(deleted, strict=True)
except ValueError as err:
raise exc.HTTPBadRequest(explanation=str(err))
if updated_since:
try:
timeutils.parse_isotime(updated_since)
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
self.cells_rpcapi.sync_instances(context, project_id=project_id,
updated_since=updated_since, deleted=deleted)
class Cells(extensions.V3APIExtensionBase):
"""Enables cells-related functionality such as adding neighbor cells,
listing neighbor cells, and getting the capabilities of the local cell.
"""
name = "Cells"
alias = ALIAS
version = 1
def get_resources(self):
coll_actions = {
'detail': 'GET',
'info': 'GET',
'sync_instances': 'POST',
'capacities': 'GET',
}
memb_actions = {
'capacities': 'GET',
}
res = extensions.ResourceExtension(ALIAS, CellsController(),
collection_actions=coll_actions,
member_actions=memb_actions)
return [res]
def get_controller_extensions(self):
return []
| apache-2.0 | -5,647,026,097,581,548,000 | 35.76584 | 78 | 0.601978 | false | 4.147296 | false | false | false | 0.000075 |
mengxn/tensorflow | tensorflow/contrib/slim/python/slim/nets/inception_v3.py | 89 | 30528 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v3 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def inception_v3_base(inputs,
final_endpoint='Mixed_7c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception model from http://arxiv.org/abs/1512.00567.
Constructs an Inception v3 network from inputs to the given final endpoint.
This method can construct the network up to the final inception block
Mixed_7c.
Note that the names of the layers in the paper do not correspond to the names
of the endpoints registered by this function although they build the same
network.
Here is a mapping from the old_names to the new names:
Old name | New name
=======================================
conv0 | Conv2d_1a_3x3
conv1 | Conv2d_2a_3x3
conv2 | Conv2d_2b_3x3
pool1 | MaxPool_3a_3x3
conv3 | Conv2d_3b_1x1
conv4 | Conv2d_4a_3x3
pool2 | MaxPool_5a_3x3
mixed_35x35x256a | Mixed_5b
mixed_35x35x288a | Mixed_5c
mixed_35x35x288b | Mixed_5d
mixed_17x17x768a | Mixed_6a
mixed_17x17x768b | Mixed_6b
mixed_17x17x768c | Mixed_6c
mixed_17x17x768d | Mixed_6d
mixed_17x17x768e | Mixed_6e
mixed_8x8x1280a | Mixed_7a
mixed_8x8x2048a | Mixed_7b
mixed_8x8x2048b | Mixed_7c
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c',
'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with variable_scope.variable_scope(scope, 'InceptionV3', [inputs]):
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
stride=1,
padding='VALID'):
# 299 x 299 x 3
end_point = 'Conv2d_1a_3x3'
net = layers.conv2d(inputs, depth(32), [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 149 x 149 x 32
end_point = 'Conv2d_2a_3x3'
net = layers.conv2d(net, depth(32), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 147 x 147 x 32
end_point = 'Conv2d_2b_3x3'
net = layers.conv2d(
net, depth(64), [3, 3], padding='SAME', scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 147 x 147 x 64
end_point = 'MaxPool_3a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 73 x 73 x 64
end_point = 'Conv2d_3b_1x1'
net = layers.conv2d(net, depth(80), [1, 1], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 73 x 73 x 80.
end_point = 'Conv2d_4a_3x3'
net = layers.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 71 x 71 x 192.
end_point = 'MaxPool_5a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 35 x 35 x 192.
# Inception blocks
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
stride=1,
padding='SAME'):
# mixed: 35 x 35 x 256.
end_point = 'Mixed_5b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(32), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_1: 35 x 35 x 288.
end_point = 'Mixed_5c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(48), [1, 1], scope='Conv2d_0b_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [5, 5], scope='Conv_1_0c_5x5')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_2: 35 x 35 x 288.
end_point = 'Mixed_5d'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(48), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [5, 5], scope='Conv2d_0b_5x5')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(64), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_3: 17 x 17 x 768.
end_point = 'Mixed_6a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net,
depth(384), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = layers.conv2d(
branch_1,
depth(96), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_1x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed4: 17 x 17 x 768.
end_point = 'Mixed_6b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(128), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(128), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(128), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_5: 17 x 17 x 768.
end_point = 'Mixed_6c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_6: 17 x 17 x 768.
end_point = 'Mixed_6d'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(160), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(160), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_7: 17 x 17 x 768.
end_point = 'Mixed_6e'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [7, 1], scope='Conv2d_0b_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0c_1x7')
branch_2 = layers.conv2d(
branch_2, depth(192), [7, 1], scope='Conv2d_0d_7x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [1, 7], scope='Conv2d_0e_1x7')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_8: 8 x 8 x 1280.
end_point = 'Mixed_7a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_0 = layers.conv2d(
branch_0,
depth(320), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(192), [1, 7], scope='Conv2d_0b_1x7')
branch_1 = layers.conv2d(
branch_1, depth(192), [7, 1], scope='Conv2d_0c_7x1')
branch_1 = layers.conv2d(
branch_1,
depth(192), [3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_9: 8 x 8 x 2048.
end_point = 'Mixed_7b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = array_ops.concat(
[
layers.conv2d(
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
layers.conv2d(
branch_1, depth(384), [3, 1], scope='Conv2d_0b_3x1')
],
3)
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = array_ops.concat(
[
layers.conv2d(
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
layers.conv2d(
branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
],
3)
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# mixed_10: 8 x 8 x 2048.
end_point = 'Mixed_7c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(320), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net, depth(384), [1, 1], scope='Conv2d_0a_1x1')
branch_1 = array_ops.concat(
[
layers.conv2d(
branch_1, depth(384), [1, 3], scope='Conv2d_0b_1x3'),
layers.conv2d(
branch_1, depth(384), [3, 1], scope='Conv2d_0c_3x1')
],
3)
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net, depth(448), [1, 1], scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(384), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = array_ops.concat(
[
layers.conv2d(
branch_2, depth(384), [1, 3], scope='Conv2d_0c_1x3'),
layers.conv2d(
branch_2, depth(384), [3, 1], scope='Conv2d_0d_3x1')
],
3)
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3, depth(192), [1, 1], scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v3(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV3'):
"""Inception model from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna.
With the default arguments this method constructs the exact model defined in
the paper. However, one can experiment with variations of the inception_v3
network by changing arguments dropout_keep_prob, min_depth and
depth_multiplier.
The default image size used to train this network is 299x299.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if 'depth_multiplier' is less than or equal to zero.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with variable_scope.variable_scope(
scope, 'InceptionV3', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
net, end_points = inception_v3_base(
inputs,
scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
# Auxiliary Head logits
with arg_scope(
[layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d],
stride=1,
padding='SAME'):
aux_logits = end_points['Mixed_6e']
with variable_scope.variable_scope('AuxLogits'):
aux_logits = layers_lib.avg_pool2d(
aux_logits, [5, 5],
stride=3,
padding='VALID',
scope='AvgPool_1a_5x5')
aux_logits = layers.conv2d(
aux_logits, depth(128), [1, 1], scope='Conv2d_1b_1x1')
# Shape of feature map before the final layer.
kernel_size = _reduced_kernel_size_for_small_input(aux_logits, [5, 5])
aux_logits = layers.conv2d(
aux_logits,
depth(768),
kernel_size,
weights_initializer=trunc_normal(0.01),
padding='VALID',
scope='Conv2d_2a_{}x{}'.format(*kernel_size))
aux_logits = layers.conv2d(
aux_logits,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
weights_initializer=trunc_normal(0.001),
scope='Conv2d_2b_1x1')
if spatial_squeeze:
aux_logits = array_ops.squeeze(
aux_logits, [1, 2], name='SpatialSqueeze')
end_points['AuxLogits'] = aux_logits
# Final pooling and prediction
with variable_scope.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [8, 8])
net = layers_lib.avg_pool2d(
net,
kernel_size,
padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
# 1 x 1 x 2048
net = layers_lib.dropout(
net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
end_points['PreLogits'] = net
# 2048
logits = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
# 1000
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v3.default_image_size = 299
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.tf.contrib.slim.ops._two_element_tuple
cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [
min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])
]
return kernel_size_out
def inception_v3_arg_scope(weight_decay=0.00004,
stddev=0.1,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': ops.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=init_ops.truncated_normal_initializer(
stddev=stddev),
activation_fn=nn_ops.relu,
normalizer_fn=layers_lib.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
| apache-2.0 | -3,537,503,690,995,678,000 | 41.518106 | 80 | 0.579468 | false | 3.272727 | false | false | false | 0.007927 |
msabramo/hmac_cli | setup.py | 1 | 1142 | import os
from setuptools import setup
this_dir = os.path.dirname(__file__)
long_description = "\n" + open(os.path.join(this_dir, 'README.rst')).read()
setup(
name='hmac_cli',
version='0.0.0',
description='Simple CLI for encrypting data with a private key, using HMAC',
long_description=long_description,
keywords='hmac',
author='Marc Abramowitz',
author_email='msabramo@gmail.com',
url='https://github.com/msabramo/hmac_cli',
py_modules=['hmac_cli'],
zip_safe=False,
install_requires=['click'],
entry_points="""\
[console_scripts]
hmac = hmac_cli:cli
""",
license='MIT',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Testing',
'Natural Language :: English',
'Intended Audience :: Developers',
],
)
| mit | -2,956,125,209,240,453,000 | 30.722222 | 80 | 0.603327 | false | 3.845118 | false | false | false | 0.000876 |
hickerson/bbn | fable/fable_sources/libtbx/utils.py | 1 | 43577 | from __future__ import division
from libtbx.queuing_system_utils import sge_utils, pbs_utils
from libtbx.str_utils import show_string
try: import gzip
except ImportError: gzip = None
try: import bz2
except ImportError: bz2 = None
try:
import hashlib
hashlib_md5 = hashlib.md5
except ImportError:
import md5
hashlib_md5 = md5.new
from stdlib import math
import warnings
import shutil
import glob
import time
import atexit
import traceback
import re
import sys, os
op = os.path
windows_device_names = """\
CON PRN AUX NUL COM1 COM2 COM3 COM4 COM5 COM6 COM7 COM8 COM9
LPT1 LPT2 LPT3 LPT4 LPT5 LPT6 LPT7 LPT8 LPT9""".split()
def xfrange(start, stop=None, step=None, tolerance=None):
"""A float range generator."""
if stop is None:
stop = start + 0.0
start = 0.0
else:
start += 0.0 # force it to be a float
if step is None:
step = 1.0
else:
assert step != 0.0
count = int(math.ceil((stop - start) / step))
if ( tolerance is not None
and abs(start + count * step - stop) < abs(step * tolerance)):
count += 1
for i in xrange(count):
yield start + i * step
def frange(start, stop=None, step=None):
return list(xfrange(start, stop=stop, step=step))
def xsamples(start, stop=None, step=None, tolerance=1e-6):
return xfrange(start, stop, step, tolerance)
def samples(start, stop=None, step=None, tolerance=1e-6):
return list(xsamples(start, stop, step, tolerance))
def escape_sh_double_quoted(s):
"the result is supposed to be double-quoted when passed to sh"
if (s is None): return None
return s.replace('\\','\\\\').replace('"','\\"')
def xlen(seq):
if (seq is None): return seq
return len(seq)
def product(seq):
result = None
for val in seq:
if (result is None):
result = val
else:
result *= val
return result
def sequence_index_dict(seq, must_be_unique=True):
result = {}
for i,elem in enumerate(seq):
if (must_be_unique): assert elem not in result
result[elem] = i
return result
def number_from_string(string):
# similar to libtbx.phil.number_from_value_string
# (please review if making changes here)
if (string.lower() in ["true", "false"]):
raise ValueError(
'Error interpreting "%s" as a numeric expression.' % string)
try: return int(string)
except KeyboardInterrupt: raise
except Exception: pass
try: return eval(string, math.__dict__, {})
except KeyboardInterrupt: raise
except Exception:
raise ValueError(
'Error interpreting "%s" as a numeric expression: %s' % (
string, format_exception()))
def gzip_open(file_name, mode):
assert mode in ["r", "rb", "w", "wb", "a", "ab"]
if (gzip is None):
un = ""
if (mode[0] == "r"): un = "un"
raise RuntimeError(
"gzip module not available: cannot %scompress file %s"
% (un, show_string(file_name)))
return gzip.open(file_name, mode)
def bz2_open(file_name, mode):
assert mode in ('r', 'w')
if bz2 is None:
raise RuntimeError('bz2 module not available: cannot %compress file %s'
% ({'r':'un', 'w':''}[mode], file_name))
return bz2.BZ2File(file_name, mode)
def warn_if_unexpected_md5_hexdigest(
path,
expected_md5_hexdigests,
hints=[],
out=None):
m = hashlib_md5()
m.update("\n".join(open(path).read().splitlines()))
current_md5_hexdigest = m.hexdigest()
if (m.hexdigest() in expected_md5_hexdigests): return False
warning = "Warning: unexpected md5 hexdigest:"
file_name = " File: %s" % show_string(path)
new_hexdigest = " New md5 hexdigest: %s" % m.hexdigest()
width = max([len(s) for s in [warning, file_name, new_hexdigest]])
if (out is None): out = sys.stdout
print >> out, "*"*width
print >> out, warning
print >> out, file_name
print >> out, new_hexdigest
for hint in hints:
print >> out, hint
print >> out, "*"*width
return True
def get_memory_from_string(mem_str):
if type(mem_str)==type(1): return mem_str
if type(mem_str)==type(1.): return mem_str
mem_str = mem_str.replace(" ","").strip().upper()
if mem_str == "": return 0
factor=1024
for i, greek in enumerate(["K","M","G","T","E","Z","Y"]):
num_str=None
if mem_str[-1]==greek:
num_str = mem_str[:-1]
if mem_str.find("%sB" % greek)==len(mem_str)-2:
num_str = mem_str[:-2]
if num_str is not None:
try:
num = float(num_str)
except ValueError, e:
raise RuntimeError("""
The numerical portion of %s is not a valid float
""" % mem_str)
break
factor*=1024
else:
try:
num = int(mem_str)
except ValueError, e:
raise RuntimeError("""
There is no memory unit or valid float in %s
""" % mem_str)
factor=1
return num*factor
def getenv_bool(variable_name, default=False):
value = os.environ.get(variable_name, None)
if (value is None): return default
value_lower = value.lower()
if (value_lower not in ["false", "true", "0", "1"]):
raise Sorry(
'Environment variable %s must be "True", "False", "0", or "1"'
' (current value: "%s").' % (variable_name, value))
return (value_lower in ["true", "1"])
def file_size(file_name):
return os.stat(file_name).st_size
def copy_file(source, target, compress=None):
assert op.isfile(source)
if (op.isdir(target)):
target = op.join(target, op.basename(source))
if (compress is None):
t = open(target, "wb")
else:
assert compress == ".gz"
t = gzip_open(file_name=target+compress, mode="wb")
t.write(open(source, "rb").read())
del t
def remove_files(pattern=None, paths=None, ensure_success=True):
assert [pattern, paths].count(None) == 1
if (paths is None):
paths = glob.glob(pattern)
for path in paths:
if (ensure_success):
if (op.exists(path)):
os.remove(path)
if (op.exists(path)):
raise RuntimeError("Cannot remove file: %s" % show_string(path))
else:
if (op.isfile(path)):
os.remove(path)
def find_files (dir_name, pattern="*", files_only=True) :
assert os.path.isdir(dir_name) and (pattern is not None)
regex = re.compile(pattern)
files = os.listdir(dir_name)
matching_files = []
for file_name in files :
full_path = os.path.join(dir_name, file_name)
if (files_only) and (not os.path.isfile(full_path)) :
continue
if (regex.search(file_name) is not None) :
matching_files.append(full_path)
return matching_files
def sort_files_by_mtime (file_names=None, dir_name=None, reverse=False) :
assert ([file_names, dir_name].count(None) == 1)
if (dir_name is not None) :
assert os.path.isdir(dir_name)
file_names = [ os.path.join(dir_name, fn) for fn in os.listdir(dir_name) ]
files_and_mtimes = []
for file_name in file_names :
files_and_mtimes.append((file_name, os.path.getmtime(file_name)))
files_and_mtimes.sort(lambda x,y: cmp(x[1], y[1]))
if (reverse) :
files_and_mtimes.reverse()
return [ file_name for file_name, mtime in files_and_mtimes ]
def tupleize(x):
try:
return tuple(x)
except KeyboardInterrupt: raise
except Exception:
return (x,)
def plural_s(n, suffix="s"):
if (n == 1): return n, ""
return n, suffix
def n_dim_index_from_one_dim(i1d, sizes):
assert len(sizes) > 0
result = []
for sz in reversed(sizes):
assert sz > 0
result.append(i1d % sz)
i1d //= sz
result.reverse()
return result
def flat_list(nested_list):
result = []
if (hasattr(nested_list, "__len__")):
for sub_list in nested_list:
result.extend(flat_list(sub_list))
else:
result.append(nested_list)
return result
def select_matching(key, choices, default=None):
for key_pattern, value in choices:
m = re.search(key_pattern, key)
if m is not None: return value
return default
class Keep: pass
class Sorry(Exception):
"""
Basic exception type for user errors; the traceback will be suppressed.
"""
__orig_module__ = __module__
# trick to get just "Sorry" instead of "libtbx.utils.Sorry"
__module__ = Exception.__module__
def reset_module (self) :
self.__class__.__module__ = self.__class__.__orig_module__
disable_tracebacklimit = "LIBTBX_DISABLE_TRACEBACKLIMIT" in os.environ
__prev_excepthook = sys.excepthook
def sorry_excepthook(type, value, traceback):
tb_off = (not disable_tracebacklimit and isinstance(value, Sorry))
if (tb_off):
class __not_set(object): pass
prev_tracebacklimit = getattr(sys, "tracebacklimit", __not_set)
sys.tracebacklimit = 0
result = __prev_excepthook(type, value, traceback)
if (tb_off):
if (prev_tracebacklimit is __not_set):
del sys.tracebacklimit
else:
sys.tracebacklimit = prev_tracebacklimit
return result
sys.excepthook = sorry_excepthook
class Usage(Sorry):
"""
Subclass of Sorry, for printing out usage instructions upon program
invocation without arguments (or --help, etc.).
"""
__module__ = Exception.__module__
class Abort(Sorry) :
"""
Subclass of Sorry, primarily used in the Phenix GUI in response to user
input.
"""
__module__ = Exception.__module__
class Failure(Sorry) :
__module__ = Exception.__module__
def detect_multiprocessing_problem():
vers_info = sys.version_info[:2]
if (vers_info < (2,6)):
return "multiprocessing module not available:" \
" Python 2.6 or higher is required" \
" (version currently in use: %d.%d)" % vers_info
import libtbx.load_env
if (libtbx.env.has_module("omptbx")) :
import omptbx
if (omptbx.omp_version is not None) :
return "multiprocessing is not compatible with OpenMP"
sem_open_msg = "This platform lacks a functioning sem_open implementation"
pool = None
try:
try:
import multiprocessing
pool = multiprocessing.Pool(processes=2)
pool.map(func=abs, iterable=range(2), chunksize=1)
except ImportError, e:
if (not str(e).startswith(sem_open_msg)):
raise
return "multiprocessing import error: " + sem_open_msg
finally:
if (pool is not None):
pool.close()
pool.join()
return None
def if_none(value, default):
if (value is None): return default
return value
def format_exception():
ei = sys.exc_info()
type_ = ei[0].__name__
value = str(ei[1])
if (value != ""):
value = value.replace(" (<string>, line ", " (line ")
else:
file_name, line = traceback.extract_tb(sys.exc_info()[2], 1)[0][:2]
if (file_name is not None):
value = file_name+" "
if (line is not None):
value += "line %d" % line
return ("%s: %s" % (type_, value)).rstrip()
def show_exception_info_if_full_testing(prefix="EXCEPTION_INFO: "):
import libtbx.load_env
if ( not libtbx.env.full_testing
and not disable_tracebacklimit):
return
from libtbx import introspection
from cStringIO import StringIO
sio = StringIO()
introspection.show_stack(out=sio)
traceback.print_exc(file=sio)
msg = "\n".join([prefix+line for line in sio.getvalue().splitlines()]) + "\n"
del sio
done = []
for out in [sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__]:
def is_done():
for o in done:
if (o is out): return True
return False
if (is_done()): continue
out.write(msg)
flush = getattr(out, "flush", None)
if (flush is not None): flush()
done.append(out)
return msg
def base36_encode(integer, width=None):
digit_set = "0123456789abcdefghijklmnopqrstuvwxyz"
digits = []
while (integer != 0):
integer, i = divmod(integer, 36)
digits.append(digit_set[i])
if (width is not None):
while (len(digits) < width):
digits.append("0")
digits.reverse()
return "".join(digits)
def base36_timestamp(seconds_since_epoch=None, multiplier=1000, width=10):
s = seconds_since_epoch
if (s is None):
s = time.time()
return base36_encode(integer=int(s * multiplier + 0.5), width=width)
def date_and_time():
seconds_since_epoch = time.time()
localtime = time.localtime(seconds_since_epoch)
if (time.daylight and localtime[8] != 0):
tzname = time.tzname[1]
offs = -time.altzone
else:
tzname = time.tzname[0]
offs = -time.timezone
return time.strftime("Date %Y-%m-%d Time %H:%M:%S", localtime) \
+ " %s %+03d%02d (%.2f s)" % (
tzname, offs//3600, offs//60%60, seconds_since_epoch)
class timer_base(object):
def __init__(self):
self.t = self.get()
def elapsed(self):
t = self.get()
d = t - self.t
return d
def delta(self):
t = self.get()
d = t - self.t
self.t = t
return d
def show_elapsed(self, prefix="", out=None):
if (out == None): out = sys.stdout
print >> out, prefix+"%.2f s" % self.elapsed()
def show_delta(self, prefix="", out=None):
if (out == None): out = sys.stdout
print >> out, prefix+"%.2f s" % self.delta()
class user_plus_sys_time(timer_base):
def get(self):
t = os.times()
return t[0] + t[1]
class wall_clock_time(timer_base):
""" motivation: when running multithreaded code, user_plus_sys_time
would report the cumulated times for all threads: not very useful
to analyse the scaling with the number of threads! Wall clock time, although
it is less reliable is the only solution in that case """
def get(self):
return time.time()
class time_log(object):
def __init__(self, label, use_wall_clock=False):
self.label = label
self.use_wall_clock = use_wall_clock
self.accumulation = 0
self.n = 0
self.delta = 0
self.timer = None
def start(self):
if (self.use_wall_clock):
self.timer = wall_clock_time()
else:
self.timer = user_plus_sys_time()
return self
def stop(self):
self.delta = self.timer.delta()
self.timer = None
self.accumulation += self.delta
self.n += 1
def average(self):
return self.accumulation / max(1,self.n)
def log(self):
self.stop()
return self.report()
def log_elapsed(self, local_label):
return "time_log: %s: %.2f elapsed %s" % (
self.label, self.timer.elapsed(), local_label)
legend = "time_log: label: n accumulation delta average"
def report(self):
assert self.timer is None
return "time_log: %s: %d %.2f %.3g %.3g" % (
self.label, self.n, self.accumulation,
self.delta, self.average())
def human_readable_time(time_in_seconds):
time_units = time_in_seconds
time_unit = "seconds"
if (time_units > 120):
time_units /= 60
time_unit = "minutes"
if (time_units > 120):
time_units /= 60
time_unit = "hours"
if (time_units > 48):
time_units /= 24
time_unit = "days"
return time_units, time_unit
def human_readable_time_as_seconds(time_units, time_unit):
if (isinstance(time_units, str)): time_units = float(time_units)
if (time_unit == "seconds"): return time_units
if (time_unit == "minutes"): return time_units*60
if (time_unit == "hours"): return time_units*60*60
if (time_unit == "days"): return time_units*60*60*24
raise RuntimeError("Unknown time_unit: %s" % time_unit)
def format_timestamp_12_hour (unix_time, short=False, replace_with="unknown") :
if unix_time is None :
return replace_with
elif short :
return time.strftime("%d-%m-%y %I:%M %p", time.localtime(float(unix_time)))
else :
return time.strftime("%b %d %Y %I:%M %p", time.localtime(float(unix_time)))
def format_timestamp_24_hour (unix_time, short=False, replace_with="unknown") :
if unix_time is None :
return "unknown"
elif short :
return time.strftime("%d-%m-%y %H:%M", time.localtime(float(unix_time)))
else :
return time.strftime("%b %d %Y %H:%M", time.localtime(float(unix_time)))
format_timestamp = format_timestamp_12_hour
def format_cpu_times(show_micro_seconds_per_tick=True):
t = os.times()
result = "u+s,u,s: %.2f %.2f %.2f" % (t[0] + t[1], t[0], t[1])
if (show_micro_seconds_per_tick):
try: python_ticker = sys.gettickeraccumulation()
except AttributeError: pass
else:
result += " micro-seconds/tick: %.3f" % ((t[0]+t[1])/python_ticker*1.e6)
return result
def show_total_time(
out=None,
show_micro_seconds_per_bytecode_instruction=True):
if (out == None): out = sys.stdout
total_time = user_plus_sys_time().get()
try: python_ticker = sys.gettickeraccumulation()
except AttributeError: pass
else:
print >> out, "Time per interpreted Python bytecode instruction:",
print >> out, "%.3f micro seconds" % (total_time / python_ticker * 1.e6)
print >> out, "Total CPU time: %.2f %s" % human_readable_time(total_time)
def show_wall_clock_time(seconds, out=None):
if (out is None): out = sys.stdout
print >> out, "wall clock time:",
if (seconds < 120):
print >> out, "%.2f seconds" % seconds
else:
m = int(seconds / 60 + 1.e-6)
s = seconds - m * 60
print >> out, "%d minutes %.2f seconds (%.2f seconds total)" % (
m, s, seconds)
out_flush = getattr(out, "flush", None)
if (out_flush is not None):
out_flush()
class show_times:
def __init__(self, time_start=None, out=None):
if (time_start is None):
t = os.times()
self.time_start = time.time() - (t[0] + t[1])
elif (time_start == "now"):
self.time_start = time.time()
else:
self.time_start = -(0-time_start) # be sure time_start is a number
self.out = out
def __call__(self):
out = self.out
if (out is None): out = sys.stdout
t = os.times()
usr_plus_sys = t[0] + t[1]
try: ticks = sys.gettickeraccumulation()
except AttributeError: ticks = None
s = "usr+sys time: %.2f seconds" % usr_plus_sys
if (ticks is not None):
s += ", ticks: %d" % ticks
if (ticks != 0):
s += ", micro-seconds/tick: %.3f" % (usr_plus_sys*1.e6/ticks)
print >> out, s
show_wall_clock_time(seconds=time.time()-self.time_start, out=out)
def show_times_at_exit(time_start=None, out=None):
atexit.register(show_times(time_start=time_start, out=out))
class host_and_user:
def __init__(self):
self.host = os.environ.get("HOST")
self.hostname = os.environ.get("HOSTNAME")
self.computername = os.environ.get("COMPUTERNAME")
self.hosttype = os.environ.get("HOSTTYPE")
self.processor_architecture = os.environ.get("PROCESSOR_ARCHITECTURE")
self.machtype = os.environ.get("MACHTYPE")
self.ostype = os.environ.get("OSTYPE")
self.vendor = os.environ.get("VENDOR")
self.user = os.environ.get("USER")
self.username = os.environ.get("USERNAME")
self.homedir = None
if (os.name == "nt") :
homedrive = os.environ.get("HOMEDRIVE")
homepath = os.environ.get("HOMEPATH")
if (not None in [homedrive, homepath]) :
self.homedir = os.path.join(homedrive, homepath)
else :
self.homedir = os.environ.get("HOME")
getpid = getattr(os, "getpid", None)
if (getpid is None):
self.pid = None
else:
self.pid = getpid()
self.sge_info = sge_utils.info()
self.pbs_info = pbs_utils.chunk_info()
def get_user_name (self) :
if (self.user is not None) :
return self.user
else :
return self.username
def get_host_name (self) :
if (self.host is not None) :
return self.host
elif (self.hostname is not None) :
return self.hostname
elif (self.computername is not None) :
return self.computername
return None
def show(self, out=None, prefix=""):
if (out is None): out = sys.stdout
if (self.host is not None):
print >> out, prefix + "HOST =", self.host
if ( self.hostname is not None
and self.hostname != self.host):
print >> out, prefix + "HOSTNAME =", self.hostname
if ( self.computername is not None
and self.computername != self.host):
print >> out, prefix + "COMPUTERNAME =", self.computername
if (self.hosttype is not None):
print >> out, prefix + "HOSTTYPE =", self.hosttype
if (self.processor_architecture is not None):
print >> out, prefix + "PROCESSOR_ARCHITECTURE =", \
self.processor_architecture
if ( self.hosttype is None
or self.machtype is None
or self.ostype is None
or "-".join([self.machtype, self.ostype]) != self.hosttype):
if (self.machtype is not None):
print >> out, prefix + "MACHTYPE =", \
self.machtype
if (self.ostype is not None):
print >> out, prefix + "OSTYPE =", \
self.ostype
if (self.vendor is not None and self.vendor != "unknown"):
print >> out, prefix + "VENDOR =", \
self.vendor
if (self.user is not None):
print >> out, prefix + "USER =", self.user
if ( self.username is not None
and self.username != self.user):
print >> out, prefix + "USERNAME =", self.username
if (self.pid is not None):
print >> out, prefix + "PID =", self.pid
self.sge_info.show(out=out, prefix=prefix)
self.pbs_info.show(out=out, prefix=prefix)
def allow_delete_directory (target_dir) :
"""
Check for specified reserved directories which are standard on many systems;
these should never be deleted as part of any program.
"""
homedir = host_and_user().homedir
safe_dirs = [
homedir,
os.path.join(homedir, "Documents"),
os.path.join(homedir, "Desktop"),
os.path.join(homedir, "Downloads"),
os.path.join(homedir, "Library"),
os.path.join(homedir, "Movies"),
os.path.join(homedir, "data"),
"/",
"/home",
"/Users",
]
target_dir = os.path.abspath(target_dir)
for safe_dir in safe_dirs :
if (target_dir == safe_dir) :
return False
return True
def _indentor_write_loop(write_method, indent, incomplete_line, lines):
for line in lines:
if (len(line) == 0):
incomplete_line = False
elif (incomplete_line):
write_method(line)
incomplete_line = False
else:
write_method(indent)
write_method(line)
write_method("\n")
class indentor(object):
def __init__(self, file_object=None, indent="", parent=None):
if (file_object is None):
if (parent is None):
file_object = sys.stdout
else:
file_object = parent.file_object
self.file_object = file_object
if (hasattr(self.file_object, "flush")):
self.flush = self._flush
self.indent = indent
self.parent = parent
self.incomplete_line = False
def write(self, block):
if (len(block) == 0): return
if (block.endswith("\n")):
_indentor_write_loop(
write_method=self.file_object.write,
indent=self.indent,
incomplete_line=self.incomplete_line,
lines=block.splitlines())
self.incomplete_line = False
else:
lines = block.splitlines()
if (len(lines) == 1):
if (self.incomplete_line):
self.file_object.write(lines[-1])
else:
self.file_object.write(self.indent + lines[-1])
else:
_indentor_write_loop(
write_method=self.file_object.write,
indent=self.indent,
incomplete_line=self.incomplete_line,
lines=lines[:-1])
self.file_object.write(self.indent + lines[-1])
self.incomplete_line = True
def _flush(self):
self.file_object.flush()
def shift_right(self, indent=" "):
return self.__class__(indent=self.indent+indent, parent=self)
class buffered_indentor(indentor):
def __init__(self, file_object=None, indent="", parent=None):
indentor.__init__(self, file_object, indent, parent)
self.buffer = []
def write(self, block):
self.buffer.append(block)
def write_buffer(self):
if (self.parent is not None):
self.parent.write_buffer()
for block in self.buffer:
indentor.write(self, block)
self.buffer = []
class null_out(object):
"""Pseudo-filehandle for suppressing printed output."""
def isatty(self): return False
def close(self): pass
def flush(self): pass
def write(self, str): pass
def writelines(self, sequence): pass
class raise_if_output(object):
"example use: sys.stdout = raise_if_output()"
def isatty(self): return False
def close(self): pass
def flush(self): pass
def write(self, str): raise RuntimeError
def writelines(self, sequence): raise RuntimeError
class multi_out(object):
"""
Multiplexing output stream, e.g. for simultaneously printing to stdout
and a logfile.
"""
def __init__(self):
self.labels = []
self.file_objects = []
self.atexit_send_to = []
self.closed = False
self.softspace = 0
atexit.register(self._atexit)
def _atexit(self):
if (not self.closed):
for f,a in zip(self.file_objects, self.atexit_send_to):
if (a is not None): a.write(f.getvalue())
def register(self, label, file_object, atexit_send_to=None):
"""Adds an output stream to the list."""
assert not self.closed
self.labels.append(label)
self.file_objects.append(file_object)
self.atexit_send_to.append(atexit_send_to)
return self
def replace_stringio(self,
old_label,
new_label,
new_file_object,
new_atexit_send_to=None):
i = self.labels.index(old_label)
old_file_object = self.file_objects[i]
new_file_object.write(old_file_object.getvalue())
old_file_object.close()
self.labels[i] = new_label
self.file_objects[i] = new_file_object
self.atexit_send_to[i] = new_atexit_send_to
def isatty(self):
return False
def close(self):
for file_object in self.file_objects:
if (file_object is sys.__stdout__): continue
if (file_object is sys.__stderr__): continue
file_object.close()
self.closed = True
def flush(self):
for file_object in self.file_objects:
flush = getattr(file_object, "flush", None)
if (flush is not None): flush()
def write(self, str):
for file_object in self.file_objects:
file_object.write(str)
def writelines(self, sequence):
for file_object in self.file_objects:
file_object.writelines(sequence)
def write_this_is_auto_generated(f, file_name_generator):
print >> f, """\
/* *****************************************************
THIS IS AN AUTOMATICALLY GENERATED FILE. DO NOT EDIT.
*****************************************************
Generated by:
%s
*/
""" % file_name_generator
class import_python_object:
def __init__(self, import_path, error_prefix, target_must_be, where_str):
path_elements = import_path.split(".")
if (len(path_elements) < 2):
raise ValueError(
'%simport path "%s" is too short%s%s' % (
error_prefix, import_path, target_must_be, where_str))
module_path = ".".join(path_elements[:-1])
try: module = __import__(module_path)
except ImportError:
raise ImportError("%sno module %s%s" % (
error_prefix, module_path, where_str))
for attr in path_elements[1:-1]:
module = getattr(module, attr)
try: self.object = getattr(module, path_elements[-1])
except AttributeError:
raise AttributeError(
'%sobject "%s" not found in module "%s"%s' % (
error_prefix, path_elements[-1], module_path, where_str))
self.path_elements = path_elements
self.module_path = module_path
self.module = module
class input_with_prompt(object):
def __init__(self, prompt, tracebacklimit=0):
try: import readline
except Exception: pass
try: self.previous_tracebacklimit = sys.tracebacklimit
except Exception: self.previous_tracebacklimit = None
if (tracebacklimit is not None):
sys.tracebacklimit = tracebacklimit
self.input = raw_input(prompt)
def __del__(self):
if (self.previous_tracebacklimit is None):
del sys.tracebacklimit
else:
sys.tracebacklimit = self.previous_tracebacklimit
def count_max(assert_less_than):
i = 0
while True:
yield None
i += 1
assert i < assert_less_than
class detect_binary_file(object):
def __init__(self, monitor_initial=None, max_fraction_non_ascii=None):
if (monitor_initial is None):
self.monitor_initial = 1000
else:
self.monitor_initial = monitor_initial
if (max_fraction_non_ascii is None):
self.max_fraction_non_ascii = 0.05
else:
self.max_fraction_non_ascii = max_fraction_non_ascii
self.n_ascii_characters = 0
self.n_non_ascii_characters = 0
self.status = None
def is_binary_file(self, block):
if (self.monitor_initial > 0):
for c in block:
if (1 < ord(c) < 128):
self.n_ascii_characters += 1
else:
self.n_non_ascii_characters += 1
self.monitor_initial -= 1
if (self.monitor_initial == 0):
if ( self.n_non_ascii_characters
> self.n_ascii_characters * self.max_fraction_non_ascii):
self.status = True
else:
self.status = False
break
return self.status
def from_initial_block(
file_name,
monitor_initial=None,
max_fraction_non_ascii=None):
detector = detect_binary_file(
monitor_initial=monitor_initial,
max_fraction_non_ascii=max_fraction_non_ascii)
block = open(file_name, "rb").read(detector.monitor_initial)
if (len(block) == 0): return False
detector.monitor_initial = min(len(block), detector.monitor_initial)
return detector.is_binary_file(block=block)
from_initial_block = staticmethod(from_initial_block)
def search_for(
pattern,
mode,
re_flags=0,
lines=None,
file_name=None):
assert mode in ["==", "find", "startswith", "endswith", "re.search", "re.match"]
assert [lines, file_name].count(None) == 1
if (lines is None):
lines = open(file_name).read().splitlines()
result = []
a = result.append
if (mode == "=="):
for l in lines:
if (l == pattern): a(l)
elif (mode == "startswith"):
for l in lines:
if (l.startswith(pattern)): a(l)
elif (mode == "endswith"):
for l in lines:
if (l.endswith(pattern)): a(l)
elif (mode == "find"):
for l in lines:
if (l.find(pattern) >= 0): a(l)
elif (mode == "re.search"):
import re
for l in lines:
if (re.search(pattern=pattern, string=l, flags=re_flags) is not None):
a(l)
else:
import re
for l in lines:
if (re.match(pattern=pattern, string=l, flags=re_flags) is not None):
a(l)
return result
class progress_displayed_as_fraction(object):
def __init__(self, n):
self.n = n
self.i = 0
if self.n == 1: self.advance = lambda: None
self.advance()
def advance(self):
if self.i > 0: sys.stdout.write('\r')
sys.stdout.write("%i / %i" % (self.i, self.n))
sys.stdout.flush()
self.i += 1
def done(self):
if self.n == 1: return
sys.stdout.write("\n")
sys.stdout.flush()
class progress_bar(progress_displayed_as_fraction):
def advance(self):
characters = ['|']
if self.i > 0:
characters.extend(['=']*(self.i-1))
characters.append('>')
characters.extend(' '*(self.n - self.i))
characters.append('|\r')
sys.stdout.write(''.join(characters))
sys.stdout.flush()
self.i += 1
def format_float_with_standard_uncertainty(value, standard_uncertainty):
if standard_uncertainty < 1e-16: return str(value)
precision = -int(round(math.log10(standard_uncertainty)))
if precision > -1:
su = standard_uncertainty * math.pow(10, precision)
if round(su,1) < 2:
su *= 10
precision += 1
fmt_str = "%%.%if(%%i)" %precision
return fmt_str %(value, round(su))
else:
precision += 1
su = int(round(standard_uncertainty, precision))
fmt_str = "%.0f(%i)"
return fmt_str %(round(value, precision), su)
def random_hex_code(number_of_digits):
import random
digits = []
for i_digit in xrange(number_of_digits):
i = random.randrange(16)
digits.append("0123456789abcdef"[i])
return "".join(digits)
def get_svn_revision(path=None):
# adapted from:
# http://code.djangoproject.com/browser/django/trunk/django/utils/version.py
rev = None
if path is None:
import libtbx.load_env
path = op.dirname(libtbx.env.dist_path(module_name="libtbx"))
entries_path = '%s/.svn/entries' % path
try:
entries = open(entries_path, 'r').read()
except IOError:
pass
else:
# Versions >= 7 of the entries file are flat text. The first line is
# the version number. The next set of digits after 'dir' is the revision.
if re.match('(\d+)', entries):
rev_match = re.search('\d+\s+dir\s+(\d+)', entries)
if rev_match:
rev = int(rev_match.groups()[0])
return rev
def get_build_tag(path=None):
tag = None
if path is None:
import libtbx.load_env
path = op.dirname(libtbx.env.dist_path(module_name="libtbx"))
tag_file_path = "%s/TAG" %path
if op.exists(tag_file_path):
tag = open(tag_file_path).readline().strip()
return tag
def getcwd_safe () :
try :
cwd = os.getcwd()
except OSError, e :
if (e.errno == 2) :
raise Sorry("Could not determine the current working directory because "+
"it has been deleted or unmounted.")
else :
raise e
return cwd
def getcwd_or_default (default=None) :
if (default is None) :
if (os.name == "nt") :
home_drive = os.environ.get("HOMEDRIVE", "C:")
home_dir = os.environ.get("HOMEPATH", "\\")
default = home_drive + home_dir
else :
default = os.environ.get("HOME", "/")
try :
cwd = os.getcwd()
except OSError, e :
if (e.errno == 2) :
cwd = default
else :
raise e
return cwd
def create_run_directory (prefix, default_directory_number=None) :
"""
Create a program output directory using sequential numbering, picking the
highest run ID. In other words, if the prefix is 'Refine' and the current
directory contains subdirectories named Refine_2 and Refine_9, the new
directory will be Refine_10.
"""
dir_number = default_directory_number
if (dir_number is None) :
dir_ids = []
for file_name in os.listdir(os.getcwd()) :
if (os.path.isdir(file_name)) and (file_name.startswith(prefix)) :
dir_id = file_name.split("_")[-1]
if (dir_id.isdigit()) :
dir_ids.append(int(dir_id))
if (len(dir_ids) > 0) :
dir_number = max(max(dir_ids) + 1, 1)
else :
dir_number = 1
dir_name = prefix + "_" + str(dir_number)
if (os.path.isdir(dir_name)) :
raise OSError("The directory %s already exists."%os.path.abspath(dir_name))
else :
os.makedirs(dir_name)
return os.path.abspath(dir_name)
class tmp_dir_wrapper (object) :
"""
Convenience methods for running in a (presumably empty) temporary directory
and copying all files to another directory. Can be used whether or not the
temporary directory is actually defined; if None, no action will be taken.
Otherwise, both tmp_dir and dest_dir (default is current directory) must be
existing paths.
"""
def __init__ (self, tmp_dir, dest_dir=None, out=sys.stdout) :
if (dest_dir is None) :
dest_dir = os.getcwd()
self.tmp_dir = tmp_dir
self.dest_dir = dest_dir
if (tmp_dir is None) :
pass
elif (not os.path.isdir(tmp_dir)) :
raise Sorry("The temporary directory %s does not exist." % tmp_dir)
else :
if (not os.path.isdir(dest_dir)) :
raise Sorry("The destination directory %s does not exist." % dest_dir)
print >> out, "Changing working directory to %s" % tmp_dir
print >> out, "Ultimate destination is %s" % dest_dir
os.chdir(tmp_dir)
def transfer_files (self, out=sys.stdout) :
if (self.tmp_dir is None) : return False
assert os.path.isdir(self.dest_dir)
files = os.listdir(self.tmp_dir)
print >> out, "Copying all output files to %s" % self.dest_dir
for file_name in files :
print >> out, " ... %s" % file_name
shutil.copy(os.path.join(self.tmp_dir, file_name), self.dest_dir)
print >> out, ""
return True
def show_development_warning (out=sys.stdout) :
print >> out, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
!! WARNING - EXPERIMENTAL PROGRAM !!
!! !!
!! This program is still in development - some functionality may be !!
!! missing and/or untested. Use at your own risk! For bug reports, etc. !!
!! email bugs@phenix-online.org. !!
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def check_if_output_directory_exists (file_name=None, dir_name=None) :
if (file_name is not None) :
assert (dir_name is None)
dir_name = os.path.dirname(file_name)
if (dir_name == "") : return
if (dir_name is None) :
raise Sorry("No output directory specified.")
if (not op.isdir(dir_name)) :
raise Sorry(("The specified output directory (%s) does not exist or "+
"is not a directory.") % dir_name)
else :
# XXX writing to Dropbox folders is generally not a good idea
head, tail = os.path.split(dir_name)
while tail != "" :
if (tail == "Dropbox") :
warnings.warn("You are directing output to a Dropbox directory. "+
"Please note that this is not guaranteed to work in all cases; "+
"use at your own risk.", UserWarning)
head, tail = os.path.split(head)
def concatenate_python_script (out, file_name) :
"""
Insert a Python script into an existing file, removing any __future__
import to prevent syntax errors. (This could be dangerous in most contexts
but is required for some of our Coot-related scripts to work.)
"""
data = open(file_name, "r").read()
print >> out, ""
print >> out, "#--- script copied from %s" % os.path.basename(file_name)
for line in data.splitlines() :
if line.startswith("from __future__") :
continue
else :
print >> out, line
print >> out, "#--- end"
print >> out, ""
def greek_time(secs):
for greek in ["","milli", "micro", "nano"]:
if secs>1:
break
secs*=1000
return secs, greek
###########################
# URL retrieval functions #
###########################
libtbx_urllib_proxy = None
def install_urllib_http_proxy (server, port=80, user=None, password=None) :
global libtbx_urllib_proxy
import urllib2
if (user is None) :
proxy = urllib2.ProxyHandler({'http': '%s:%d' % (server, port) })
opener = urllib2.build_opener(proxy)
else :
proxy = urllib2.ProxyHandler({
'http': 'http://%s:%s@%s:%s' % (user, password, server, port),
})
auth = urllib2.HTTPBasicAuthHandler()
opener = urllib2.build_opener(proxy, auth, urllib2.HTTPHandler)
libtbx_urllib_proxy = proxy
urllib2.install_opener(opener)
print "Installed urllib2 proxy at %s:%d" % (server, port)
return proxy
def urlopen (*args, **kwds) :
"""
Substitute for urllib2.urlopen, with automatic HTTP proxy configuration
if specific environment variables are defined.
"""
if ("CCTBX_HTTP_PROXY" in os.environ) and (libtbx_urllib_proxy is None) :
server = os.environ["CCTBX_HTTP_PROXY_SERVER"]
port = os.environ.get("CCTBX_HTTP_PROXY_PORT", 80)
user = os.environ.get("CCTBX_HTTP_PROXY_USER", None)
passwd = os.environ.get("CCTBX_HTTP_PROXY_PASSWORD", None)
if (user is not None) and (password is None) :
raise Sorry("You have defined a user name for the HTTP proxy, but "+
"no password was specified. Please set the environment variable "+
"CCTBX_HTTP_PROXY_PASSWORD.")
install_urllib_http_proxy(
server=server,
port=port,
user=user,
password=password)
import urllib2
return urllib2.urlopen(*args, **kwds)
class download_progress (object) :
"""
Simple proxy for displaying download status - here with methods for
writing to the console, but can be subclassed and used for graphical display.
"""
def __init__ (self, log=None, n_kb_total=None) :
if (log is None) :
log = null_out()
self.log = log
self.n_kb_total = n_kb_total
self.n_kb_elapsed = 0
def set_total_size (self, n_kb_total) :
self.n_kb_total = n_kb_total
self.n_kb_elapsed = 0
def increment (self, n_kb) :
assert (self.n_kb_total is not None)
self.n_kb_elapsed += n_kb
return self.show_progress()
def show_progress (self) :
self.log.write("\r%d/%d KB downloaded" % (self.n_kb_elapsed,
self.n_kb_total))
self.log.flush()
return True
def percent_finished (self) :
assert (self.n_kb_total is not None)
return 100 * min(1.0, self.n_kb_elapsed / self.n_kb_total)
def complete (self) :
self.log.write("\rDownload complete")
def run_continuously (self) :
"""
Placeholder for cases where the download is not being run asynchronously.
"""
pass
class download_target (object) :
"""
Flexible callable object for retrieving a file from a URL, with optional
HTTPS authentication. Designed to be runnable in a separate thread with
graphical progress update.
Note that in some circumstances SSL support may be missing from the socket
module, in which case we use 'curl' to download securely. (This will not
work on Windows, obviously.)
"""
def __init__ (self,
url,
file_name,
use_curl=None, # SSL only
user=None, # SSL only
password=None, # SSL only
base_url=None) : # SSL only
self.url = url
self.file_name = file_name
self.use_curl = use_curl
self.user = user
self.password = password
self.base_url = base_url
if (not None in [self.user, self.password]) :
assert (self.base_url is not None)
import socket
if ((not self.use_curl) and (hasattr(socket, "ssl")) and
(hasattr(socket.ssl, "__call__"))) :
self.use_curl = False
else :
self.use_curl = True
def __call__ (self, log=None, progress_meter=None) :
if (log is None) :
log = null_out()
if (progress_meter is None) :
progress_meter = download_progress(log=log)
from libtbx import easy_run
import urllib2
file_name = self.file_name # return value
if (not self.use_curl) :
if (not None in [self.user, self.password]) :
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, self.base_url, params.user, params.password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
req = urllib2.urlopen(self.url)
info = req.info()
n_kb_total = int(info['Content-length']) / 1024
progress_meter.set_total_size(n_kb_total)
# TODO adjust chunk size automatically based on download speed
n_kb_chunk = getattr(self, "n_kb_chunk", 512)
chunksize = n_kb_chunk * 1024
fp = open(self.file_name, 'wb')
while True:
chunk = req.read(chunksize)
if not chunk: break
if not progress_meter.increment(n_kb_chunk) :
file_name = None
break
fp.write(chunk)
fp.close()
progress_meter.complete()
else :
progress_meter.run_continuously()
if (not None in [self.user, self.password]) :
curl_args = "--user %s:%s" % (self.user, self.password)
rc = easy_run.call("curl %s \"%s\" -o %s" % (curl_args, self.url,
self.file_name))
progress_meter.complete()
if (rc != 0) :
raise RuntimeError("curl exited with code %d" % rc)
if (file_name is None) :
return None
return op.abspath(self.file_name)
| mit | 215,904,346,378,941,440 | 29.949574 | 82 | 0.625055 | false | 3.348214 | false | false | false | 0.021456 |
GiedriusM/openthread | tests/scripts/thread-cert/Cert_9_2_17_Orphan.py | 5 | 3760 | #!/usr/bin/python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
CHANNEL1 = 11
CHANNEL2 = 18
CHANNEL_MASK = 1 << 18
PANID_INIT = 0xface
LEADER1 = 1
LEADER2 = 2
ED1 = 3
class Cert_9_2_17_Orphan(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,4):
self.nodes[i] = node.Node(i)
self.nodes[LEADER1].set_active_dataset(10, channel=CHANNEL1, panid=PANID_INIT, channel_mask=CHANNEL_MASK)
self.nodes[LEADER1].set_mode('rsdn')
self.nodes[LEADER1].add_whitelist(self.nodes[ED1].get_addr64())
self.nodes[LEADER1].enable_whitelist()
self.nodes[LEADER1].set_router_selection_jitter(1)
self.nodes[LEADER2].set_active_dataset(20, channel=CHANNEL2, panid=PANID_INIT, channel_mask=CHANNEL_MASK)
self.nodes[LEADER2].set_mode('rsdn')
self.nodes[LEADER2].enable_whitelist()
self.nodes[LEADER2].set_router_selection_jitter(1)
self.nodes[ED1].set_active_dataset(10, channel=CHANNEL1, panid=PANID_INIT, channel_mask=CHANNEL_MASK)
self.nodes[ED1].set_mode('rsn')
self.nodes[ED1].add_whitelist(self.nodes[LEADER1].get_addr64())
self.nodes[ED1].enable_whitelist()
self.nodes[ED1].set_timeout(3)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER1].start()
self.nodes[LEADER1].set_state('leader')
self.assertEqual(self.nodes[LEADER1].get_state(), 'leader')
self.nodes[LEADER2].start()
self.nodes[LEADER2].set_state('leader')
self.assertEqual(self.nodes[LEADER2].get_state(), 'leader')
self.nodes[ED1].start()
time.sleep(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.nodes[LEADER1].stop()
self.nodes[LEADER2].add_whitelist(self.nodes[ED1].get_addr64())
self.nodes[ED1].add_whitelist(self.nodes[LEADER2].get_addr64())
time.sleep(20)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.assertEqual(self.nodes[ED1].get_channel(), CHANNEL2)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 4,534,714,539,930,546,000 | 39 | 113 | 0.694681 | false | 3.574144 | false | false | false | 0.001596 |
aqualid/aqualid | aql/utils/aql_temp_file.py | 1 | 3463 | #
# Copyright (c) 2014-2015 The developers of Aqualid project
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import tempfile
import errno
import shutil
__all__ = (
'Tempfile', 'Tempdir',
)
# ==============================================================================
class Tempfile (str):
def __new__(cls, prefix='tmp', suffix='', root_dir=None, mode='w+b'):
handle = tempfile.NamedTemporaryFile(mode=mode, suffix=suffix,
prefix=prefix, dir=root_dir,
delete=False)
self = super(Tempfile, cls).__new__(cls, handle.name)
self.__handle = handle
return self
def __enter__(self):
return self
# noinspection PyUnusedLocal
def __exit__(self, exc_type, exc_value, traceback):
self.remove()
def write(self, data):
self.__handle.write(data)
def read(self, data):
self.__handle.read(data)
def seek(self, offset):
self.__handle.seek(offset)
def tell(self):
return self.__handle.tell()
def flush(self):
if self.__handle is not None:
self.__handle.flush()
def close(self):
if self.__handle is not None:
self.__handle.close()
self.__handle = None
return self
def remove(self):
self.close()
try:
os.remove(self)
except OSError as ex:
if ex.errno != errno.ENOENT:
raise
return self
# ==============================================================================
class Tempdir(str):
def __new__(cls, prefix='tmp', suffix='', root_dir=None, name=None):
if root_dir is not None:
if not os.path.isdir(root_dir):
os.makedirs(root_dir)
if name is None:
path = tempfile.mkdtemp(prefix=prefix, suffix=suffix, dir=root_dir)
else:
if root_dir is not None:
name = os.path.join(root_dir, name)
path = os.path.abspath(name)
if not os.path.isdir(path):
os.makedirs(path)
return super(Tempdir, cls).__new__(cls, path)
def __enter__(self):
return self
# noinspection PyUnusedLocal
def __exit__(self, exc_type, exc_value, traceback):
self.remove()
def remove(self):
shutil.rmtree(self, ignore_errors=False)
| mit | 4,742,429,209,160,121,000 | 28.347458 | 80 | 0.585908 | false | 4.366961 | false | false | false | 0 |
RexFuzzle/sfepy | sfepy/discrete/fem/periodic.py | 1 | 2761 | import numpy as nm
from sfepy.discrete.fem.mesh import find_map
##
# c: 05.05.2008, r: 05.05.2008
eps = 1e-12
def set_accuracy( eps ):
globals()['eps'] = eps
##
# c: 18.10.2006, r: 05.05.2008
def match_grid_line( coor1, coor2, which ):
"""
Match coordinates `coor1` with `coor2` along the axis `which`.
"""
if coor1.shape != coor2.shape:
raise ValueError, 'incompatible shapes: %s == %s'\
% ( coor1.shape, coor2.shape)
c1 = coor1[:,which]
c2 = coor2[:,which]
i1 = nm.argsort( c1 )
i2 = nm.argsort( c2 )
if not nm.all( nm.abs(c1[i1] - c2[i2]) < eps ):
print c1[i1]
print c2[i2]
print nm.abs(c1[i1] - c2[i2]).max()
raise ValueError('cannot match nodes!')
return i1, i2
##
# 18.10.2006, c
# last revision: 18.10.2006
def match_x_line( coor1, coor2 ):
return match_grid_line( coor1, coor2, 0 )
def match_y_line( coor1, coor2 ):
return match_grid_line( coor1, coor2, 1 )
def match_z_line( coor1, coor2 ):
return match_grid_line( coor1, coor2, 2 )
##
# 01.06.2007, c
# last revision: 01.06.2007
def match_grid_plane( coor1, coor2, which ):
"""
Match coordinates `coor1` with `coor2` along the plane with normal axis
`which`.
"""
if coor1.shape != coor2.shape:
raise ValueError, 'incompatible shapes: %s == %s'\
% ( coor1.shape, coor2.shape)
offset = coor1[0,which] - coor2[0,which]
aux = coor2.copy()
aux[:,which] += offset
i1, i2 = find_map( coor1, aux, join = False )
if i1.shape[0] != coor1.shape[0]:
print coor1[i1]
print coor2[i2]
print nm.abs(coor1[i1] - coor2[i2]).max(0)
ii = nm.setdiff1d(nm.arange(coor1.shape[0]), i1)
print coor1[ii]
print coor2[ii]
raise ValueError('cannot match nodes!')
return i1, i2
##
# 01.06.2007, c
# last revision: 01.06.2007
def match_x_plane( coor1, coor2 ):
return match_grid_plane( coor1, coor2, 0 )
def match_y_plane( coor1, coor2 ):
return match_grid_plane( coor1, coor2, 1 )
def match_z_plane( coor1, coor2 ):
return match_grid_plane( coor1, coor2, 2 )
def match_coors(coors1, coors2):
"""
Match coordinates `coors1` with `coors2`.
"""
if coors1.shape != coors2.shape:
raise ValueError('incompatible shapes: %s == %s'
% (coors1.shape, coors2.shape))
i1, i2 = find_map(coors1, coors2, join=False)
if i1.shape[0] != coors1.shape[0]:
print coors1[i1]
print coors2[i2]
print nm.abs(coors1[i1] - coors2[i2]).max(0)
ii = nm.setdiff1d(nm.arange(coors1.shape[0]), i1)
print coors1[ii]
print coors2[ii]
raise ValueError('cannot match nodes!')
return i1, i2
| bsd-3-clause | -5,476,372,556,632,342,000 | 26.336634 | 75 | 0.587468 | false | 2.657363 | false | false | false | 0.021369 |
resmo/ansible | test/units/module_utils/common/parameters/test_list_no_log_values.py | 22 | 1187 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils.common.parameters import list_no_log_values
@pytest.fixture
def params():
return {
'secret': 'undercookwovennativity',
'other_secret': 'cautious-slate-makeshift',
'state': 'present',
'value': 5,
}
def test_list_no_log_values(params):
argument_spec = {
'secret': {'type': 'str', 'no_log': True},
'other_secret': {'type': 'str', 'no_log': True},
'state': {'type': 'str'},
'value': {'type': 'int'},
}
result = set(('undercookwovennativity', 'cautious-slate-makeshift'))
assert result == list_no_log_values(argument_spec, params)
def test_list_no_log_values_no_secrets(params):
argument_spec = {
'other_secret': {'type': 'str', 'no_log': False},
'state': {'type': 'str'},
'value': {'type': 'int'},
}
result = set()
assert result == list_no_log_values(argument_spec, params)
| gpl-3.0 | -5,025,738,247,655,301,000 | 27.95122 | 92 | 0.601516 | false | 3.199461 | false | false | false | 0.000842 |
datamade/yournextmp-popit | elections/illinois_state_primary_2016/management/commands/illinois_state_primary_2016_save_geojson.py | 1 | 1221 | import json
from os.path import join, dirname, abspath
from django.core.management.base import BaseCommand
import requests
from elections.models import Election
class Command(BaseCommand):
help = 'Create posts and elections for the 2016 Illinois General Primary'
def handle(self, **options):
elections = Election.objects.prefetch_related('posts').prefetch_related('posts__base')
geojson = {}
for election in elections:
geojson[election.slug] = {
'type': 'FeatureCollection',
'features': [],
}
for post in election.posts.all():
feature = {
'type': 'Feature',
'geometry': json.loads(post.base.area.geom),
'properties': {'label': post.base.label, 'id': post.base.area.name}
}
geojson[election.slug]['features'].append(feature)
output_path = abspath(join(dirname(__file__), '..', '..', 'static'))
for election_slug, geom in geojson.items():
with open('{0}/{1}.geojson'.format(output_path, election_slug), 'w') as f:
f.write(json.dumps(geom))
| agpl-3.0 | -3,500,318,998,682,955,000 | 28.780488 | 94 | 0.560197 | false | 4.224913 | false | false | false | 0.004914 |
jchodera/mdtraj | mdtraj/utils/contextmanagers.py | 16 | 1073 | from __future__ import print_function, division
import os
import time
import shutil
import tempfile
import contextlib
__all__ = ["timing", "enter_temp_directory"]
class timing(object):
"""A timing context manager
Examples
--------
>>> long_function = lambda : None
>>> with timing('long_function'):
... long_function()
long_function: 0.000 seconds
"""
def __init__(self, name='block'):
self.name = name
self.time = 0
self.start = None
self.end = None
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, ty, val, tb):
self.end = time.time()
self.time = self.end - self.start
print("%s: %0.3f seconds" % (self.name, self.time))
return False
@contextlib.contextmanager
def enter_temp_directory():
"""Create and enter a temporary directory; used as context manager."""
temp_dir = tempfile.mkdtemp()
cwd = os.getcwd()
os.chdir(temp_dir)
yield
os.chdir(cwd)
shutil.rmtree(temp_dir)
| lgpl-2.1 | -280,814,763,441,486,080 | 22.326087 | 74 | 0.589003 | false | 3.845878 | false | false | false | 0.001864 |
saisrisathya/whatsapps | build/lib/yowsup/layers/protocol_media/protocolentities/message_media_downloadable.py | 45 | 3704 | from .message_media import MediaMessageProtocolEntity
from yowsup.common.tools import WATools
import mimetypes
import os
class DownloadableMediaMessageProtocolEntity(MediaMessageProtocolEntity):
'''
<message t="{{TIME_STAMP}}" from="{{CONTACT_JID}}"
offline="{{OFFLINE}}" type="text" id="{{MESSAGE_ID}}" notify="{{NOTIFY_NAME}}">
<media type="{{DOWNLOADABLE_MEDIA_TYPE: (image | audio | video)}}"
mimetype="{{MIME_TYPE}}"
filehash="{{FILE_HASH}}"
url="{{DOWNLOAD_URL}}"
ip="{{IP}}"
size="{{MEDIA SIZE}}"
file="{{FILENAME}}"
> {{THUMBNAIL_RAWDATA (JPEG?)}}
</media>
</message>
'''
def __init__(self, mediaType,
mimeType, fileHash, url, ip, size, fileName,
_id = None, _from = None, to = None, notify = None, timestamp = None,
participant = None, preview = None, offline = None, retry = None):
super(DownloadableMediaMessageProtocolEntity, self).__init__(mediaType, _id, _from, to, notify, timestamp, participant, preview, offline, retry)
self.setDownloadableMediaProps(mimeType, fileHash, url, ip, size, fileName)
def __str__(self):
out = super(DownloadableMediaMessageProtocolEntity, self).__str__()
out += "MimeType: %s\n" % self.mimeType
out += "File Hash: %s\n" % self.fileHash
out += "URL: %s\n" % self.url
out += "IP: %s\n" % self.ip
out += "File Size: %s\n" % self.size
out += "File name: %s\n" % self.fileName
return out
def getMediaSize(self):
return self.size
def getMediaUrl(self):
return self.url
def getMimeType(self):
return self.mimeType
def setDownloadableMediaProps(self, mimeType, fileHash, url, ip, size, fileName):
self.mimeType = mimeType
self.fileHash = fileHash
self.url = url
self.ip = ip
self.size = int(size)
self.fileName = fileName
def toProtocolTreeNode(self):
node = super(DownloadableMediaMessageProtocolEntity, self).toProtocolTreeNode()
mediaNode = node.getChild("media")
mediaNode.setAttribute("mimetype", self.mimeType)
mediaNode.setAttribute("filehash", self.fileHash)
mediaNode.setAttribute("url", self.url)
if self.ip:
mediaNode.setAttribute("ip", self.ip)
mediaNode.setAttribute("size", str(self.size))
mediaNode.setAttribute("file", self.fileName)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = MediaMessageProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = DownloadableMediaMessageProtocolEntity
mediaNode = node.getChild("media")
entity.setDownloadableMediaProps(
mediaNode.getAttributeValue("mimetype"),
mediaNode.getAttributeValue("filehash"),
mediaNode.getAttributeValue("url"),
mediaNode.getAttributeValue("ip"),
mediaNode.getAttributeValue("size"),
mediaNode.getAttributeValue("file")
)
return entity
@staticmethod
def fromFilePath(fpath, url, mediaType, ip, to, mimeType = None, preview = None, filehash = None, filesize = None):
mimeType = mimeType or mimetypes.guess_type(fpath)[0]
filehash = filehash or WATools.getFileHashForUpload(fpath)
size = filesize or os.path.getsize(fpath)
fileName = os.path.basename(fpath)
return DownloadableMediaMessageProtocolEntity(mediaType, mimeType, filehash, url, ip, size, fileName, to = to, preview = preview)
| gpl-3.0 | 3,810,431,545,743,259,600 | 38.827957 | 152 | 0.615011 | false | 4.017354 | false | false | false | 0.015119 |
legastero/AdHoc | adhoc/plugins/xep_0050.py | 1 | 9147 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
import time
from sleekxmpp import Iq
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.xmlstream import ElementBase, ET, register_stanza_plugin
from sleekxmpp.plugins.base import base_plugin
log = logging.getLogger(__name__)
class Command(ElementBase):
name = 'command'
namespace = 'http://jabber.org/protocol/commands'
plugin_attrib = 'command'
interfaces = set(('action', 'sessionid', 'node', 'status',
'actions', 'actions_execute'))
actions = set(('cancel', 'complete', 'execute', 'next', 'prev'))
statuses = set(('canceled', 'completed', 'executing'))
next_actions = set(('prev', 'next', 'complete'))
def get_action(self):
return self._get_attr('action', default='execute')
def set_actions(self, values):
self.del_actions()
if values:
self._set_sub_text('{%s}actions' % self.namespace, '', True)
actions = self.find('{%s}actions' % self.namespace)
for val in values:
if val in self.next_actions:
action = ET.Element('{%s}%s' % (self.namespace, val))
actions.append(action)
def get_actions(self):
actions = []
actions_xml = self.find('{%s}actions' % self.namespace)
if actions_xml is not None:
for action in self.next_actions:
action_xml = actions_xml.find('{%s}%s' % (self.namespace,
action))
if action_xml is not None:
actions.append(action)
return actions
def del_actions(self):
self._del_sub('{%s}actions' % self.namespace)
class xep_0050(base_plugin):
"""
XEP-0050 Ad-Hoc Commands
"""
def plugin_init(self):
self.xep = '0050'
self.description = 'Ad-Hoc Commands'
self.threaded = self.config.get('threaded', True)
self.addCommand = self.add_command
self.getNewSession = self.new_session
self.xmpp.register_handler(
Callback("Ad-Hoc Execute",
StanzaPath('iq@type=set/command'),
self._handle_command))
register_stanza_plugin(Iq, Command)
self.xmpp.add_event_handler('command_execute',
self._handle_command_start,
threaded=self.threaded)
self.xmpp.add_event_handler('command_next',
self._handle_command_next,
threaded=self.threaded)
self.xmpp.add_event_handler('command_cancel',
self._handle_command_cancel,
threaded=self.threaded)
self.xmpp.add_event_handler('command_complete',
self._handle_command_complete,
threaded=self.threaded)
self.commands = {}
self.sessions = {}
def post_init(self):
base_plugin.post_init(self)
self.xmpp['xep_0030'].add_feature(Command.namespace)
def add_command(self, jid, node, name, handler):
if jid is None:
jid = self.xmpp.boundjid.full
self.xmpp['xep_0030'].add_identity(category='automation',
itype='command-list',
name='Ad-Hoc commands',
node=Command.namespace,
jid=jid)
self.xmpp['xep_0030'].add_item(jid=jid,
name=name,
node=Command.namespace,
subnode=node,
ijid=jid)
self.xmpp['xep_0030'].add_identity(category='automation',
itype='command-node',
name=name,
node=node,
jid=jid)
self.xmpp['xep_0030'].add_feature(Command.namespace, None, jid)
self.commands[(jid, node)] = (name, handler)
def new_session(self):
return str(time.time()) + '-' + self.xmpp.new_id()
def _handle_command(self, iq):
self.xmpp.event('command_%s' % iq['command']['action'], iq)
def _handle_command_start(self, iq):
sessionid = self.new_session()
node = iq['command']['node']
key = (iq['to'].full, node)
name, handler = self.commands[key]
initial_session = {'id': sessionid,
'from': iq['from'],
'to': iq['to'],
'payload': None,
'interface': '',
'has_next': False,
'allow_complete': False,
'past': [],
'next': None,
'cancel': None}
session = handler(iq, initial_session)
payload = session['payload']
register_stanza_plugin(Command, payload.__class__)
session['interface'] = payload.plugin_attrib
self.sessions[sessionid] = session
iq.reply()
iq['command']['sessionid'] = sessionid
iq['command']['node'] = node
if session['next'] is None:
iq['command']['actions'] = []
iq['command']['status'] = 'completed'
elif session['has_next']:
if session['allow_complete']:
iq['command']['actions'] = ['next', 'complete']
else:
iq['command']['actions'] = ['next']
iq['command']['status'] = 'executing'
else:
iq['command']['actions'] = ['complete']
iq['command']['status'] = 'executing'
iq['command'].append(payload)
iq.send()
def _handle_command_complete(self, iq):
node = iq['command']['node']
sessionid = iq['command']['sessionid']
session = self.sessions[sessionid]
handler = session['next']
interface = session['interface']
results = iq['command'][interface]
handler(results, session)
iq.reply()
iq['command']['node'] = node
iq['command']['sessionid'] = sessionid
iq['command']['actions'] = []
iq['command']['status'] = 'completed'
iq.send()
del self.sessions[sessionid]
def _handle_command_next(self, iq):
node = iq['command']['node']
sessionid = iq['command']['sessionid']
session = self.sessions[sessionid]
handler = session['next']
interface = session['interface']
results = iq['command'][interface]
session = handler(results, session)
payload = session['payload']
register_stanza_plugin(Command, payload.__class__)
session['interface'] = payload.plugin_attrib
self.sessions[sessionid] = session
register_stanza_plugin(Command, payload.__class__)
iq.reply()
iq['command']['node'] = node
iq['command']['sessionid'] = sessionid
if session['next'] is None:
iq['command']['status'] = 'completed'
iq['command']['actions'] = ['prev']
elif session['has_next']:
iq['command']['status'] = 'executing'
if session['allow_complete']:
iq['command']['actions'] = ['prev', 'next', 'complete']
else:
iq['command']['actions'] = ['prev', 'next']
else:
iq['command']['status'] = 'executing'
iq['command']['actions'] = ['prev', 'complete']
iq['command'].append(payload)
iq.send()
def _handle_command_cancel(self, iq):
node = iq['command']['node']
sessionid = iq['command']['sessionid']
session = self.sessions[sessionid]
handler = session['cancel']
if handler:
handler(iq, session)
try:
del self.sessions[sessionid]
except:
pass
iq.reply()
iq['command']['node'] = node
iq['command']['sessionid'] = sessionid
iq['command']['status'] = 'canceled'
iq.send()
def get_commands(self, jid, **kwargs):
return self.xmpp['xep_0030'].get_items(jid=jid,
node=Command.namespace,
**kwargs)
def run_command(self, jid, node, ifrom=None, **kwargs):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['to'] = jid
if ifrom:
iq['from'] = ifrom
iq['command']['node'] = node
iq['command']['action'] = 'execute'
return iq.send(**kwargs)
| mit | -6,480,460,244,589,101,000 | 33.130597 | 73 | 0.497212 | false | 4.318697 | false | false | false | 0.000109 |
Cinntax/home-assistant | homeassistant/components/sigfox/sensor.py | 3 | 5117 | """Sensor for SigFox devices."""
import logging
import datetime
import json
from urllib.parse import urljoin
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = datetime.timedelta(seconds=30)
API_URL = "https://backend.sigfox.com/api/"
CONF_API_LOGIN = "api_login"
CONF_API_PASSWORD = "api_password"
DEFAULT_NAME = "sigfox"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_LOGIN): cv.string,
vol.Required(CONF_API_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sigfox sensor."""
api_login = config[CONF_API_LOGIN]
api_password = config[CONF_API_PASSWORD]
name = config[CONF_NAME]
try:
sigfox = SigfoxAPI(api_login, api_password)
except ValueError:
return False
auth = sigfox.auth
devices = sigfox.devices
sensors = []
for device in devices:
sensors.append(SigfoxDevice(device, auth, name))
add_entities(sensors, True)
def epoch_to_datetime(epoch_time):
"""Take an ms since epoch and return datetime string."""
return datetime.datetime.fromtimestamp(epoch_time).isoformat()
class SigfoxAPI:
"""Class for interacting with the SigFox API."""
def __init__(self, api_login, api_password):
"""Initialise the API object."""
self._auth = requests.auth.HTTPBasicAuth(api_login, api_password)
if self.check_credentials():
device_types = self.get_device_types()
self._devices = self.get_devices(device_types)
def check_credentials(self):
"""Check API credentials are valid."""
url = urljoin(API_URL, "devicetypes")
response = requests.get(url, auth=self._auth, timeout=10)
if response.status_code != 200:
if response.status_code == 401:
_LOGGER.error("Invalid credentials for Sigfox API")
else:
_LOGGER.error(
"Unable to login to Sigfox API, error code %s",
str(response.status_code),
)
raise ValueError("Sigfox integration not set up")
return True
def get_device_types(self):
"""Get a list of device types."""
url = urljoin(API_URL, "devicetypes")
response = requests.get(url, auth=self._auth, timeout=10)
device_types = []
for device in json.loads(response.text)["data"]:
device_types.append(device["id"])
return device_types
def get_devices(self, device_types):
"""Get the device_id of each device registered."""
devices = []
for unique_type in device_types:
location_url = f"devicetypes/{unique_type}/devices"
url = urljoin(API_URL, location_url)
response = requests.get(url, auth=self._auth, timeout=10)
devices_data = json.loads(response.text)["data"]
for device in devices_data:
devices.append(device["id"])
return devices
@property
def auth(self):
"""Return the API authentification."""
return self._auth
@property
def devices(self):
"""Return the list of device_id."""
return self._devices
class SigfoxDevice(Entity):
"""Class for single sigfox device."""
def __init__(self, device_id, auth, name):
"""Initialise the device object."""
self._device_id = device_id
self._auth = auth
self._message_data = {}
self._name = f"{name}_{device_id}"
self._state = None
def get_last_message(self):
"""Return the last message from a device."""
device_url = f"devices/{self._device_id}/messages?limit=1"
url = urljoin(API_URL, device_url)
response = requests.get(url, auth=self._auth, timeout=10)
data = json.loads(response.text)["data"][0]
payload = bytes.fromhex(data["data"]).decode("utf-8")
lat = data["rinfos"][0]["lat"]
lng = data["rinfos"][0]["lng"]
snr = data["snr"]
epoch_time = data["time"]
return {
"lat": lat,
"lng": lng,
"payload": payload,
"snr": snr,
"time": epoch_to_datetime(epoch_time),
}
def update(self):
"""Fetch the latest device message."""
self._message_data = self.get_last_message()
self._state = self._message_data["payload"]
@property
def name(self):
"""Return the HA name of the sensor."""
return self._name
@property
def state(self):
"""Return the payload of the last message."""
return self._state
@property
def device_state_attributes(self):
"""Return other details about the last message."""
return self._message_data
| apache-2.0 | 4,441,586,935,380,872,000 | 30.98125 | 73 | 0.607387 | false | 4.000782 | false | false | false | 0 |
nimeshkumar11/Implementation-of-Adaptive-CoDel-in-ns-3 | src/lr-wpan/bindings/modulegen__gcc_ILP32.py | 14 | 426909 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.lr_wpan', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanTxOption [enumeration]
module.add_enum('LrWpanTxOption', ['TX_OPTION_NONE', 'TX_OPTION_ACK', 'TX_OPTION_GTS', 'TX_OPTION_INDIRECT'])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyOption [enumeration]
module.add_enum('LrWpanPhyOption', ['IEEE_802_15_4_868MHZ_BPSK', 'IEEE_802_15_4_915MHZ_BPSK', 'IEEE_802_15_4_868MHZ_ASK', 'IEEE_802_15_4_915MHZ_ASK', 'IEEE_802_15_4_868MHZ_OQPSK', 'IEEE_802_15_4_915MHZ_OQPSK', 'IEEE_802_15_4_2_4GHZ_OQPSK', 'IEEE_802_15_4_INVALID_PHY_OPTION'])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyEnumeration [enumeration]
module.add_enum('LrWpanPhyEnumeration', ['IEEE_802_15_4_PHY_BUSY', 'IEEE_802_15_4_PHY_BUSY_RX', 'IEEE_802_15_4_PHY_BUSY_TX', 'IEEE_802_15_4_PHY_FORCE_TRX_OFF', 'IEEE_802_15_4_PHY_IDLE', 'IEEE_802_15_4_PHY_INVALID_PARAMETER', 'IEEE_802_15_4_PHY_RX_ON', 'IEEE_802_15_4_PHY_SUCCESS', 'IEEE_802_15_4_PHY_TRX_OFF', 'IEEE_802_15_4_PHY_TX_ON', 'IEEE_802_15_4_PHY_UNSUPPORTED_ATTRIBUTE', 'IEEE_802_15_4_PHY_READ_ONLY', 'IEEE_802_15_4_PHY_UNSPECIFIED'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMcpsDataConfirmStatus [enumeration]
module.add_enum('LrWpanMcpsDataConfirmStatus', ['IEEE_802_15_4_SUCCESS', 'IEEE_802_15_4_TRANSACTION_OVERFLOW', 'IEEE_802_15_4_TRANSACTION_EXPIRED', 'IEEE_802_15_4_CHANNEL_ACCESS_FAILURE', 'IEEE_802_15_4_INVALID_ADDRESS', 'IEEE_802_15_4_INVALID_GTS', 'IEEE_802_15_4_NO_ACK', 'IEEE_802_15_4_COUNTER_ERROR', 'IEEE_802_15_4_FRAME_TOO_LONG', 'IEEE_802_15_4_UNAVAILABLE_KEY', 'IEEE_802_15_4_UNSUPPORTED_SECURITY', 'IEEE_802_15_4_INVALID_PARAMETER'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanAssociationStatus [enumeration]
module.add_enum('LrWpanAssociationStatus', ['ASSOCIATED', 'PAN_AT_CAPACITY', 'PAN_ACCESS_DENIED', 'ASSOCIATED_WITHOUT_ADDRESS', 'DISASSOCIATED'])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPibAttributeIdentifier [enumeration]
module.add_enum('LrWpanPibAttributeIdentifier', ['phyCurrentChannel', 'phyChannelsSupported', 'phyTransmitPower', 'phyCCAMode', 'phyCurrentPage', 'phyMaxFrameDuration', 'phySHRDuration', 'phySymbolsPerOctet'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMacState [enumeration]
module.add_enum('LrWpanMacState', ['MAC_IDLE', 'MAC_CSMA', 'MAC_SENDING', 'MAC_ACK_PENDING', 'CHANNEL_ACCESS_FAILURE', 'CHANNEL_IDLE', 'SET_PHY_TX_ON'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanAddressMode [enumeration]
module.add_enum('LrWpanAddressMode', ['NO_PANID_ADDR', 'ADDR_MODE_RESERVED', 'SHORT_ADDR', 'EXT_ADDR'])
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class]
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class]
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower [struct]
module.add_class('LrWpanEdPower')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyDataAndSymbolRates [struct]
module.add_class('LrWpanPhyDataAndSymbolRates')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes [struct]
module.add_class('LrWpanPhyPibAttributes')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber [struct]
module.add_class('LrWpanPhyPpduHeaderSymbolNumber')
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): ns3::LrWpanSpectrumValueHelper [class]
module.add_class('LrWpanSpectrumValueHelper')
## mac16-address.h (module 'network'): ns3::Mac16Address [class]
module.add_class('Mac16Address', import_from_module='ns.network')
## mac16-address.h (module 'network'): ns3::Mac16Address [class]
root_module['ns3::Mac16Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac64-address.h (module 'network'): ns3::Mac64Address [class]
module.add_class('Mac64Address', import_from_module='ns.network')
## mac64-address.h (module 'network'): ns3::Mac64Address [class]
root_module['ns3::Mac64Address'].implicitly_converts_to(root_module['ns3::Address'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataConfirmParams [struct]
module.add_class('McpsDataConfirmParams')
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams [struct]
module.add_class('McpsDataIndicationParams')
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams [struct]
module.add_class('McpsDataRequestParams')
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## pcap-file.h (module 'network'): ns3::PcapFile [class]
module.add_class('PcapFile', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [class]
module.add_class('PcapHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper::DataLinkType [enumeration]
module.add_enum('DataLinkType', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_LINUX_SLL', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO', 'DLT_IEEE802_15_4', 'DLT_NETLINK'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class]
module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned char, signed char> [class]
module.add_class('SequenceNumber8', import_from_module='ns.network')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## simulator.h (module 'core'): ns3::Simulator [enumeration]
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanMacState> [class]
module.add_class('TracedValue', template_parameters=['ns3::LrWpanMacState'])
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanPhyEnumeration> [class]
module.add_class('TracedValue', template_parameters=['ns3::LrWpanPhyEnumeration'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## lr-wpan-helper.h (module 'lr-wpan'): ns3::LrWpanHelper [class]
module.add_class('LrWpanHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])
## lr-wpan-lqi-tag.h (module 'lr-wpan'): ns3::LrWpanLqiTag [class]
module.add_class('LrWpanLqiTag', parent=root_module['ns3::Tag'])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader [class]
module.add_class('LrWpanMacHeader', parent=root_module['ns3::Header'])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::LrWpanMacType [enumeration]
module.add_enum('LrWpanMacType', ['LRWPAN_MAC_BEACON', 'LRWPAN_MAC_DATA', 'LRWPAN_MAC_ACKNOWLEDGMENT', 'LRWPAN_MAC_COMMAND', 'LRWPAN_MAC_RESERVED'], outer_class=root_module['ns3::LrWpanMacHeader'])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::AddrModeType [enumeration]
module.add_enum('AddrModeType', ['NOADDR', 'RESADDR', 'SHORTADDR', 'EXTADDR'], outer_class=root_module['ns3::LrWpanMacHeader'])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::KeyIdModeType [enumeration]
module.add_enum('KeyIdModeType', ['IMPLICIT', 'NOKEYSOURCE', 'SHORTKEYSOURCE', 'LONGKEYSOURCE'], outer_class=root_module['ns3::LrWpanMacHeader'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class]
module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::LrWpanInterferenceHelper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::SpectrumSignalParameters', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SpectrumSignalParameters>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## spectrum-phy.h (module 'spectrum'): ns3::SpectrumPhy [class]
module.add_class('SpectrumPhy', import_from_module='ns.spectrum', parent=root_module['ns3::Object'])
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters [struct]
module.add_class('SpectrumSignalParameters', import_from_module='ns.spectrum', parent=root_module['ns3::SimpleRefCount< ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> >'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## boolean.h (module 'core'): ns3::BooleanChecker [class]
module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## boolean.h (module 'core'): ns3::BooleanValue [class]
module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## double.h (module 'core'): ns3::DoubleValue [class]
module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## enum.h (module 'core'): ns3::EnumChecker [class]
module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## enum.h (module 'core'): ns3::EnumValue [class]
module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## integer.h (module 'core'): ns3::IntegerValue [class]
module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## lr-wpan-csmaca.h (module 'lr-wpan'): ns3::LrWpanCsmaCa [class]
module.add_class('LrWpanCsmaCa', parent=root_module['ns3::Object'])
## lr-wpan-error-model.h (module 'lr-wpan'): ns3::LrWpanErrorModel [class]
module.add_class('LrWpanErrorModel', parent=root_module['ns3::Object'])
## lr-wpan-interference-helper.h (module 'lr-wpan'): ns3::LrWpanInterferenceHelper [class]
module.add_class('LrWpanInterferenceHelper', parent=root_module['ns3::SimpleRefCount< ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> >'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac [class]
module.add_class('LrWpanMac', parent=root_module['ns3::Object'])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): ns3::LrWpanMacTrailer [class]
module.add_class('LrWpanMacTrailer', parent=root_module['ns3::Trailer'])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhy [class]
module.add_class('LrWpanPhy', parent=root_module['ns3::SpectrumPhy'])
## lr-wpan-spectrum-signal-parameters.h (module 'lr-wpan'): ns3::LrWpanSpectrumSignalParameters [struct]
module.add_class('LrWpanSpectrumSignalParameters', parent=root_module['ns3::SpectrumSignalParameters'])
## mac16-address.h (module 'network'): ns3::Mac16AddressChecker [class]
module.add_class('Mac16AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac16-address.h (module 'network'): ns3::Mac16AddressValue [class]
module.add_class('Mac16AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac64-address.h (module 'network'): ns3::Mac64AddressChecker [class]
module.add_class('Mac64AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac64-address.h (module 'network'): ns3::Mac64AddressValue [class]
module.add_class('Mac64AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## uinteger.h (module 'core'): ns3::UintegerValue [class]
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::LrWpanNetDevice [class]
module.add_class('LrWpanNetDevice', parent=root_module['ns3::NetDevice'])
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PlmeCcaConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PlmeCcaConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PlmeCcaConfirmCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PlmeEdConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PlmeEdConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PlmeEdConfirmCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, unsigned int, ns3::Ptr< ns3::Packet >, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PdDataIndicationCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, unsigned int, ns3::Ptr< ns3::Packet >, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PdDataIndicationCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, unsigned int, ns3::Ptr< ns3::Packet >, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PdDataIndicationCallback&')
typehandlers.add_type_alias(u'ns3::SequenceNumber< short unsigned int, short int >', u'ns3::SequenceNumber16')
typehandlers.add_type_alias(u'ns3::SequenceNumber< short unsigned int, short int >*', u'ns3::SequenceNumber16*')
typehandlers.add_type_alias(u'ns3::SequenceNumber< short unsigned int, short int >&', u'ns3::SequenceNumber16&')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned int, int >', u'ns3::SequenceNumber32')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned int, int >*', u'ns3::SequenceNumber32*')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned int, int >&', u'ns3::SequenceNumber32&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanMacState, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::LrWpanMacStateCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanMacState, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::LrWpanMacStateCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanMacState, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::LrWpanMacStateCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::LrWpanPhyPibAttributes *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PlmeGetAttributeConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::LrWpanPhyPibAttributes *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PlmeGetAttributeConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::LrWpanPhyPibAttributes *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PlmeGetAttributeConfirmCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PdDataConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PdDataConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PdDataConfirmCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataIndicationParams, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::McpsDataIndicationCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataIndicationParams, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::McpsDataIndicationCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataIndicationParams, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::McpsDataIndicationCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataConfirmParams, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::McpsDataConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataConfirmParams, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::McpsDataConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataConfirmParams, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::McpsDataConfirmCallback&')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned char, signed char >', u'ns3::SequenceNumber8')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned char, signed char >*', u'ns3::SequenceNumber8*')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned char, signed char >&', u'ns3::SequenceNumber8&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PlmeSetAttributeConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PlmeSetAttributeConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PlmeSetAttributeConfirmCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PlmeSetTRXStateConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PlmeSetTRXStateConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PlmeSetTRXStateConfirmCallback&')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) *', u'ns3::TracedValueCallback::Bool')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) **', u'ns3::TracedValueCallback::Bool*')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) *&', u'ns3::TracedValueCallback::Bool&')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) *', u'ns3::TracedValueCallback::Double')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) **', u'ns3::TracedValueCallback::Double*')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) *&', u'ns3::TracedValueCallback::Double&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SequenceNumber32, ns3::SequenceNumber32 ) *', u'ns3::TracedValueCallback::SequenceNumber32')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SequenceNumber32, ns3::SequenceNumber32 ) **', u'ns3::TracedValueCallback::SequenceNumber32*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SequenceNumber32, ns3::SequenceNumber32 ) *&', u'ns3::TracedValueCallback::SequenceNumber32&')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) *', u'ns3::TracedValueCallback::Uint8')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) **', u'ns3::TracedValueCallback::Uint8*')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) *&', u'ns3::TracedValueCallback::Uint8&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) *', u'ns3::TracedValueCallback::Uint32')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) **', u'ns3::TracedValueCallback::Uint32*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) *&', u'ns3::TracedValueCallback::Uint32&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) *', u'ns3::TracedValueCallback::Int16')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) **', u'ns3::TracedValueCallback::Int16*')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) *&', u'ns3::TracedValueCallback::Int16&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanPhyEnumeration, ns3::LrWpanPhyEnumeration ) *', u'ns3::TracedValueCallback::LrWpanPhyEnumeration')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanPhyEnumeration, ns3::LrWpanPhyEnumeration ) **', u'ns3::TracedValueCallback::LrWpanPhyEnumeration*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanPhyEnumeration, ns3::LrWpanPhyEnumeration ) *&', u'ns3::TracedValueCallback::LrWpanPhyEnumeration&')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) *', u'ns3::TracedValueCallback::Int32')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) **', u'ns3::TracedValueCallback::Int32*')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) *&', u'ns3::TracedValueCallback::Int32&')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) *', u'ns3::TracedValueCallback::Int8')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) **', u'ns3::TracedValueCallback::Int8*')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) *&', u'ns3::TracedValueCallback::Int8&')
typehandlers.add_type_alias(u'void ( * ) ( ) *', u'ns3::TracedValueCallback::Void')
typehandlers.add_type_alias(u'void ( * ) ( ) **', u'ns3::TracedValueCallback::Void*')
typehandlers.add_type_alias(u'void ( * ) ( ) *&', u'ns3::TracedValueCallback::Void&')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) *', u'ns3::TracedValueCallback::Uint16')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) **', u'ns3::TracedValueCallback::Uint16*')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) *&', u'ns3::TracedValueCallback::Uint16&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanMacState, ns3::LrWpanMacState ) *', u'ns3::TracedValueCallback::LrWpanMacState')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanMacState, ns3::LrWpanMacState ) **', u'ns3::TracedValueCallback::LrWpanMacState*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanMacState, ns3::LrWpanMacState ) *&', u'ns3::TracedValueCallback::LrWpanMacState&')
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper'])
register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3LrWpanEdPower_methods(root_module, root_module['ns3::LrWpanEdPower'])
register_Ns3LrWpanPhyDataAndSymbolRates_methods(root_module, root_module['ns3::LrWpanPhyDataAndSymbolRates'])
register_Ns3LrWpanPhyPibAttributes_methods(root_module, root_module['ns3::LrWpanPhyPibAttributes'])
register_Ns3LrWpanPhyPpduHeaderSymbolNumber_methods(root_module, root_module['ns3::LrWpanPhyPpduHeaderSymbolNumber'])
register_Ns3LrWpanSpectrumValueHelper_methods(root_module, root_module['ns3::LrWpanSpectrumValueHelper'])
register_Ns3Mac16Address_methods(root_module, root_module['ns3::Mac16Address'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3Mac64Address_methods(root_module, root_module['ns3::Mac64Address'])
register_Ns3McpsDataConfirmParams_methods(root_module, root_module['ns3::McpsDataConfirmParams'])
register_Ns3McpsDataIndicationParams_methods(root_module, root_module['ns3::McpsDataIndicationParams'])
register_Ns3McpsDataRequestParams_methods(root_module, root_module['ns3::McpsDataRequestParams'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile'])
register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper'])
register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice'])
register_Ns3SequenceNumber8_methods(root_module, root_module['ns3::SequenceNumber8'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TracedValue__Ns3LrWpanMacState_methods(root_module, root_module['ns3::TracedValue< ns3::LrWpanMacState >'])
register_Ns3TracedValue__Ns3LrWpanPhyEnumeration_methods(root_module, root_module['ns3::TracedValue< ns3::LrWpanPhyEnumeration >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3LrWpanHelper_methods(root_module, root_module['ns3::LrWpanHelper'])
register_Ns3LrWpanLqiTag_methods(root_module, root_module['ns3::LrWpanLqiTag'])
register_Ns3LrWpanMacHeader_methods(root_module, root_module['ns3::LrWpanMacHeader'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3LrWpanInterferenceHelper_Ns3Empty_Ns3DefaultDeleter__lt__ns3LrWpanInterferenceHelper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3SpectrumSignalParameters_Ns3Empty_Ns3DefaultDeleter__lt__ns3SpectrumSignalParameters__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SpectrumPhy_methods(root_module, root_module['ns3::SpectrumPhy'])
register_Ns3SpectrumSignalParameters_methods(root_module, root_module['ns3::SpectrumSignalParameters'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LrWpanCsmaCa_methods(root_module, root_module['ns3::LrWpanCsmaCa'])
register_Ns3LrWpanErrorModel_methods(root_module, root_module['ns3::LrWpanErrorModel'])
register_Ns3LrWpanInterferenceHelper_methods(root_module, root_module['ns3::LrWpanInterferenceHelper'])
register_Ns3LrWpanMac_methods(root_module, root_module['ns3::LrWpanMac'])
register_Ns3LrWpanMacTrailer_methods(root_module, root_module['ns3::LrWpanMacTrailer'])
register_Ns3LrWpanPhy_methods(root_module, root_module['ns3::LrWpanPhy'])
register_Ns3LrWpanSpectrumSignalParameters_methods(root_module, root_module['ns3::LrWpanSpectrumSignalParameters'])
register_Ns3Mac16AddressChecker_methods(root_module, root_module['ns3::Mac16AddressChecker'])
register_Ns3Mac16AddressValue_methods(root_module, root_module['ns3::Mac16AddressValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3Mac64AddressChecker_methods(root_module, root_module['ns3::Mac64AddressChecker'])
register_Ns3Mac64AddressValue_methods(root_module, root_module['ns3::Mac64AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3LrWpanNetDevice_methods(root_module, root_module['ns3::LrWpanNetDevice'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AsciiTraceHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::OutputStreamWrapper> ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::_Ios_Openmode filemode=std::ios_base::out) [member function]
cls.add_method('CreateFileStream',
'ns3::Ptr< ns3::OutputStreamWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode', default_value='std::ios_base::out')])
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Ptr<ns3::NetDevice> nd) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string ndName) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, uint32_t nodeid, uint32_t deviceid) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('std::string', 'prefix')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function]
cls.add_method('GetRemainingSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3LrWpanEdPower_methods(root_module, cls):
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower::LrWpanEdPower() [constructor]
cls.add_constructor([])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower::LrWpanEdPower(ns3::LrWpanEdPower const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanEdPower const &', 'arg0')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower::averagePower [variable]
cls.add_instance_attribute('averagePower', 'double', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower::lastUpdate [variable]
cls.add_instance_attribute('lastUpdate', 'ns3::Time', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower::measurementLength [variable]
cls.add_instance_attribute('measurementLength', 'ns3::Time', is_const=False)
return
def register_Ns3LrWpanPhyDataAndSymbolRates_methods(root_module, cls):
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyDataAndSymbolRates::LrWpanPhyDataAndSymbolRates() [constructor]
cls.add_constructor([])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyDataAndSymbolRates::LrWpanPhyDataAndSymbolRates(ns3::LrWpanPhyDataAndSymbolRates const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanPhyDataAndSymbolRates const &', 'arg0')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyDataAndSymbolRates::bitRate [variable]
cls.add_instance_attribute('bitRate', 'double', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyDataAndSymbolRates::symbolRate [variable]
cls.add_instance_attribute('symbolRate', 'double', is_const=False)
return
def register_Ns3LrWpanPhyPibAttributes_methods(root_module, cls):
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::LrWpanPhyPibAttributes() [constructor]
cls.add_constructor([])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::LrWpanPhyPibAttributes(ns3::LrWpanPhyPibAttributes const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanPhyPibAttributes const &', 'arg0')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyCCAMode [variable]
cls.add_instance_attribute('phyCCAMode', 'uint8_t', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyChannelsSupported [variable]
cls.add_instance_attribute('phyChannelsSupported', 'uint32_t [ 32 ]', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyCurrentChannel [variable]
cls.add_instance_attribute('phyCurrentChannel', 'uint8_t', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyCurrentPage [variable]
cls.add_instance_attribute('phyCurrentPage', 'uint32_t', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyMaxFrameDuration [variable]
cls.add_instance_attribute('phyMaxFrameDuration', 'uint32_t', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phySHRDuration [variable]
cls.add_instance_attribute('phySHRDuration', 'uint32_t', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phySymbolsPerOctet [variable]
cls.add_instance_attribute('phySymbolsPerOctet', 'double', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyTransmitPower [variable]
cls.add_instance_attribute('phyTransmitPower', 'uint8_t', is_const=False)
return
def register_Ns3LrWpanPhyPpduHeaderSymbolNumber_methods(root_module, cls):
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber::LrWpanPhyPpduHeaderSymbolNumber() [constructor]
cls.add_constructor([])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber::LrWpanPhyPpduHeaderSymbolNumber(ns3::LrWpanPhyPpduHeaderSymbolNumber const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanPhyPpduHeaderSymbolNumber const &', 'arg0')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber::phr [variable]
cls.add_instance_attribute('phr', 'double', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber::shrPreamble [variable]
cls.add_instance_attribute('shrPreamble', 'double', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber::shrSfd [variable]
cls.add_instance_attribute('shrSfd', 'double', is_const=False)
return
def register_Ns3LrWpanSpectrumValueHelper_methods(root_module, cls):
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): ns3::LrWpanSpectrumValueHelper::LrWpanSpectrumValueHelper(ns3::LrWpanSpectrumValueHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanSpectrumValueHelper const &', 'arg0')])
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): ns3::LrWpanSpectrumValueHelper::LrWpanSpectrumValueHelper() [constructor]
cls.add_constructor([])
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumValue> ns3::LrWpanSpectrumValueHelper::CreateNoisePowerSpectralDensity(uint32_t channel) [member function]
cls.add_method('CreateNoisePowerSpectralDensity',
'ns3::Ptr< ns3::SpectrumValue >',
[param('uint32_t', 'channel')])
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumValue> ns3::LrWpanSpectrumValueHelper::CreateTxPowerSpectralDensity(double txPower, uint32_t channel) [member function]
cls.add_method('CreateTxPowerSpectralDensity',
'ns3::Ptr< ns3::SpectrumValue >',
[param('double', 'txPower'), param('uint32_t', 'channel')])
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): static double ns3::LrWpanSpectrumValueHelper::TotalAvgPower(ns3::Ptr<ns3::SpectrumValue const> psd, uint32_t channel) [member function]
cls.add_method('TotalAvgPower',
'double',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'psd'), param('uint32_t', 'channel')],
is_static=True)
return
def register_Ns3Mac16Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac16-address.h (module 'network'): ns3::Mac16Address::Mac16Address(ns3::Mac16Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac16Address const &', 'arg0')])
## mac16-address.h (module 'network'): ns3::Mac16Address::Mac16Address() [constructor]
cls.add_constructor([])
## mac16-address.h (module 'network'): ns3::Mac16Address::Mac16Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac16-address.h (module 'network'): static ns3::Mac16Address ns3::Mac16Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac16Address',
[],
is_static=True)
## mac16-address.h (module 'network'): static ns3::Mac16Address ns3::Mac16Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac16Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac16-address.h (module 'network'): void ns3::Mac16Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac16-address.h (module 'network'): void ns3::Mac16Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac16-address.h (module 'network'): static bool ns3::Mac16Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3Mac64Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address(ns3::Mac64Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac64Address const &', 'arg0')])
## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address() [constructor]
cls.add_constructor([])
## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac64-address.h (module 'network'): static ns3::Mac64Address ns3::Mac64Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac64Address',
[],
is_static=True)
## mac64-address.h (module 'network'): static ns3::Mac64Address ns3::Mac64Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac64Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac64-address.h (module 'network'): void ns3::Mac64Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac64-address.h (module 'network'): void ns3::Mac64Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac64-address.h (module 'network'): static bool ns3::Mac64Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3McpsDataConfirmParams_methods(root_module, cls):
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataConfirmParams::McpsDataConfirmParams() [constructor]
cls.add_constructor([])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataConfirmParams::McpsDataConfirmParams(ns3::McpsDataConfirmParams const & arg0) [copy constructor]
cls.add_constructor([param('ns3::McpsDataConfirmParams const &', 'arg0')])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataConfirmParams::m_msduHandle [variable]
cls.add_instance_attribute('m_msduHandle', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataConfirmParams::m_status [variable]
cls.add_instance_attribute('m_status', 'ns3::LrWpanMcpsDataConfirmStatus', is_const=False)
return
def register_Ns3McpsDataIndicationParams_methods(root_module, cls):
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::McpsDataIndicationParams() [constructor]
cls.add_constructor([])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::McpsDataIndicationParams(ns3::McpsDataIndicationParams const & arg0) [copy constructor]
cls.add_constructor([param('ns3::McpsDataIndicationParams const &', 'arg0')])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_dsn [variable]
cls.add_instance_attribute('m_dsn', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_dstAddr [variable]
cls.add_instance_attribute('m_dstAddr', 'ns3::Mac16Address', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_dstAddrMode [variable]
cls.add_instance_attribute('m_dstAddrMode', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_dstPanId [variable]
cls.add_instance_attribute('m_dstPanId', 'uint16_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_mpduLinkQuality [variable]
cls.add_instance_attribute('m_mpduLinkQuality', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_srcAddr [variable]
cls.add_instance_attribute('m_srcAddr', 'ns3::Mac16Address', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_srcAddrMode [variable]
cls.add_instance_attribute('m_srcAddrMode', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_srcPanId [variable]
cls.add_instance_attribute('m_srcPanId', 'uint16_t', is_const=False)
return
def register_Ns3McpsDataRequestParams_methods(root_module, cls):
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::McpsDataRequestParams(ns3::McpsDataRequestParams const & arg0) [copy constructor]
cls.add_constructor([param('ns3::McpsDataRequestParams const &', 'arg0')])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::McpsDataRequestParams() [constructor]
cls.add_constructor([])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_dstAddr [variable]
cls.add_instance_attribute('m_dstAddr', 'ns3::Mac16Address', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_dstAddrMode [variable]
cls.add_instance_attribute('m_dstAddrMode', 'ns3::LrWpanAddressMode', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_dstPanId [variable]
cls.add_instance_attribute('m_dstPanId', 'uint16_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_msduHandle [variable]
cls.add_instance_attribute('m_msduHandle', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_srcAddrMode [variable]
cls.add_instance_attribute('m_srcAddrMode', 'ns3::LrWpanAddressMode', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_txOptions [variable]
cls.add_instance_attribute('m_txOptions', 'uint8_t', is_const=False)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PcapFile_methods(root_module, cls):
## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor]
cls.add_constructor([])
## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t & packets, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function]
cls.add_method('Diff',
'bool',
[param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t &', 'packets'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')],
is_static=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function]
cls.add_method('GetSwapMode',
'bool',
[])
## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false, bool nanosecMode=false) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false'), param('bool', 'nanosecMode', default_value='false')])
## pcap-file.h (module 'network'): bool ns3::PcapFile::IsNanoSecMode() [member function]
cls.add_method('IsNanoSecMode',
'bool',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function]
cls.add_method('Read',
'void',
[param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header const & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header const &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable]
cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True)
## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable]
cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True)
return
def register_Ns3PcapHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::PcapFileWrapper> ns3::PcapHelper::CreateFile(std::string filename, std::_Ios_Openmode filemode, ns3::PcapHelper::DataLinkType dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=0) [member function]
cls.add_method('CreateFile',
'ns3::Ptr< ns3::PcapFileWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode'), param('ns3::PcapHelper::DataLinkType', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='0')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3PcapHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function]
cls.add_method('EnablePcapAll',
'void',
[param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SequenceNumber8_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber8'], root_module['ns3::SequenceNumber8'], param('ns3::SequenceNumber< unsigned char, signed char > const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber8'], root_module['ns3::SequenceNumber8'], param('signed char', u'right'))
cls.add_inplace_numeric_operator('+=', param('signed char', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::SequenceNumber8'], root_module['ns3::SequenceNumber8'], param('signed char', u'right'))
cls.add_inplace_numeric_operator('-=', param('signed char', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('>=')
## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned char, signed char>::SequenceNumber() [constructor]
cls.add_constructor([])
## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned char, signed char>::SequenceNumber(unsigned char value) [constructor]
cls.add_constructor([param('unsigned char', 'value')])
## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned char, signed char>::SequenceNumber(ns3::SequenceNumber<unsigned char, signed char> const & value) [copy constructor]
cls.add_constructor([param('ns3::SequenceNumber< unsigned char, signed char > const &', 'value')])
## sequence-number.h (module 'network'): unsigned char ns3::SequenceNumber<unsigned char, signed char>::GetValue() const [member function]
cls.add_method('GetValue',
'unsigned char',
[],
is_const=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TracedValue__Ns3LrWpanMacState_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanMacState>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanMacState>::TracedValue(ns3::TracedValue<ns3::LrWpanMacState> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< ns3::LrWpanMacState > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanMacState>::TracedValue(ns3::LrWpanMacState const & v) [constructor]
cls.add_constructor([param('ns3::LrWpanMacState const &', 'v')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanMacState>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanMacState>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanMacState>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanMacState>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): ns3::LrWpanMacState ns3::TracedValue<ns3::LrWpanMacState>::Get() const [member function]
cls.add_method('Get',
'ns3::LrWpanMacState',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanMacState>::Set(ns3::LrWpanMacState const & v) [member function]
cls.add_method('Set',
'void',
[param('ns3::LrWpanMacState const &', 'v')])
return
def register_Ns3TracedValue__Ns3LrWpanPhyEnumeration_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanPhyEnumeration>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanPhyEnumeration>::TracedValue(ns3::TracedValue<ns3::LrWpanPhyEnumeration> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< ns3::LrWpanPhyEnumeration > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanPhyEnumeration>::TracedValue(ns3::LrWpanPhyEnumeration const & v) [constructor]
cls.add_constructor([param('ns3::LrWpanPhyEnumeration const &', 'v')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanPhyEnumeration>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanPhyEnumeration>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanPhyEnumeration>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanPhyEnumeration>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): ns3::LrWpanPhyEnumeration ns3::TracedValue<ns3::LrWpanPhyEnumeration>::Get() const [member function]
cls.add_method('Get',
'ns3::LrWpanPhyEnumeration',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanPhyEnumeration>::Set(ns3::LrWpanPhyEnumeration const & v) [member function]
cls.add_method('Set',
'void',
[param('ns3::LrWpanPhyEnumeration const &', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3LrWpanHelper_methods(root_module, cls):
## lr-wpan-helper.h (module 'lr-wpan'): ns3::LrWpanHelper::LrWpanHelper() [constructor]
cls.add_constructor([])
## lr-wpan-helper.h (module 'lr-wpan'): ns3::LrWpanHelper::LrWpanHelper(bool useMultiModelSpectrumChannel) [constructor]
cls.add_constructor([param('bool', 'useMultiModelSpectrumChannel')])
## lr-wpan-helper.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumChannel> ns3::LrWpanHelper::GetChannel() [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::SpectrumChannel >',
[])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::SetChannel(ns3::Ptr<ns3::SpectrumChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::SpectrumChannel >', 'channel')])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::SetChannel(std::string channelName) [member function]
cls.add_method('SetChannel',
'void',
[param('std::string', 'channelName')])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::AddMobility(ns3::Ptr<ns3::LrWpanPhy> phy, ns3::Ptr<ns3::MobilityModel> m) [member function]
cls.add_method('AddMobility',
'void',
[param('ns3::Ptr< ns3::LrWpanPhy >', 'phy'), param('ns3::Ptr< ns3::MobilityModel >', 'm')])
## lr-wpan-helper.h (module 'lr-wpan'): ns3::NetDeviceContainer ns3::LrWpanHelper::Install(ns3::NodeContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c')])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::AssociateToPan(ns3::NetDeviceContainer c, uint16_t panId) [member function]
cls.add_method('AssociateToPan',
'void',
[param('ns3::NetDeviceContainer', 'c'), param('uint16_t', 'panId')])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::EnableLogComponents() [member function]
cls.add_method('EnableLogComponents',
'void',
[])
## lr-wpan-helper.h (module 'lr-wpan'): static std::string ns3::LrWpanHelper::LrWpanPhyEnumerationPrinter(ns3::LrWpanPhyEnumeration e) [member function]
cls.add_method('LrWpanPhyEnumerationPrinter',
'std::string',
[param('ns3::LrWpanPhyEnumeration', 'e')],
is_static=True)
## lr-wpan-helper.h (module 'lr-wpan'): static std::string ns3::LrWpanHelper::LrWpanMacStatePrinter(ns3::LrWpanMacState e) [member function]
cls.add_method('LrWpanMacStatePrinter',
'std::string',
[param('ns3::LrWpanMacState', 'e')],
is_static=True)
## lr-wpan-helper.h (module 'lr-wpan'): int64_t ns3::LrWpanHelper::AssignStreams(ns3::NetDeviceContainer c, int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('ns3::NetDeviceContainer', 'c'), param('int64_t', 'stream')])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
visibility='private', is_virtual=True)
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
visibility='private', is_virtual=True)
return
def register_Ns3LrWpanLqiTag_methods(root_module, cls):
## lr-wpan-lqi-tag.h (module 'lr-wpan'): ns3::LrWpanLqiTag::LrWpanLqiTag(ns3::LrWpanLqiTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanLqiTag const &', 'arg0')])
## lr-wpan-lqi-tag.h (module 'lr-wpan'): ns3::LrWpanLqiTag::LrWpanLqiTag() [constructor]
cls.add_constructor([])
## lr-wpan-lqi-tag.h (module 'lr-wpan'): ns3::LrWpanLqiTag::LrWpanLqiTag(uint8_t lqi) [constructor]
cls.add_constructor([param('uint8_t', 'lqi')])
## lr-wpan-lqi-tag.h (module 'lr-wpan'): void ns3::LrWpanLqiTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): uint8_t ns3::LrWpanLqiTag::Get() const [member function]
cls.add_method('Get',
'uint8_t',
[],
is_const=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): ns3::TypeId ns3::LrWpanLqiTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): uint32_t ns3::LrWpanLqiTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanLqiTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): void ns3::LrWpanLqiTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): void ns3::LrWpanLqiTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): void ns3::LrWpanLqiTag::Set(uint8_t lqi) [member function]
cls.add_method('Set',
'void',
[param('uint8_t', 'lqi')])
return
def register_Ns3LrWpanMacHeader_methods(root_module, cls):
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::LrWpanMacHeader(ns3::LrWpanMacHeader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanMacHeader const &', 'arg0')])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::LrWpanMacHeader() [constructor]
cls.add_constructor([])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::LrWpanMacHeader(ns3::LrWpanMacHeader::LrWpanMacType wpanMacType, uint8_t seqNum) [constructor]
cls.add_constructor([param('ns3::LrWpanMacHeader::LrWpanMacType', 'wpanMacType'), param('uint8_t', 'seqNum')])
## lr-wpan-mac-header.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetDstAddrMode() const [member function]
cls.add_method('GetDstAddrMode',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint16_t ns3::LrWpanMacHeader::GetDstPanId() const [member function]
cls.add_method('GetDstPanId',
'uint16_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::Mac64Address ns3::LrWpanMacHeader::GetExtDstAddr() const [member function]
cls.add_method('GetExtDstAddr',
'ns3::Mac64Address',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::Mac64Address ns3::LrWpanMacHeader::GetExtSrcAddr() const [member function]
cls.add_method('GetExtSrcAddr',
'ns3::Mac64Address',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint16_t ns3::LrWpanMacHeader::GetFrameControl() const [member function]
cls.add_method('GetFrameControl',
'uint16_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetFrameVer() const [member function]
cls.add_method('GetFrameVer',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacHeader::GetFrmCounter() const [member function]
cls.add_method('GetFrmCounter',
'uint32_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetFrmCtrlRes() const [member function]
cls.add_method('GetFrmCtrlRes',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::TypeId ns3::LrWpanMacHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetKeyIdIndex() const [member function]
cls.add_method('GetKeyIdIndex',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetKeyIdMode() const [member function]
cls.add_method('GetKeyIdMode',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacHeader::GetKeyIdSrc32() const [member function]
cls.add_method('GetKeyIdSrc32',
'uint32_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint64_t ns3::LrWpanMacHeader::GetKeyIdSrc64() const [member function]
cls.add_method('GetKeyIdSrc64',
'uint64_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetSecControl() const [member function]
cls.add_method('GetSecControl',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetSecCtrlReserved() const [member function]
cls.add_method('GetSecCtrlReserved',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetSecLevel() const [member function]
cls.add_method('GetSecLevel',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetSeqNum() const [member function]
cls.add_method('GetSeqNum',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::Mac16Address ns3::LrWpanMacHeader::GetShortDstAddr() const [member function]
cls.add_method('GetShortDstAddr',
'ns3::Mac16Address',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::Mac16Address ns3::LrWpanMacHeader::GetShortSrcAddr() const [member function]
cls.add_method('GetShortSrcAddr',
'ns3::Mac16Address',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetSrcAddrMode() const [member function]
cls.add_method('GetSrcAddrMode',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint16_t ns3::LrWpanMacHeader::GetSrcPanId() const [member function]
cls.add_method('GetSrcPanId',
'uint16_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::LrWpanMacType ns3::LrWpanMacHeader::GetType() const [member function]
cls.add_method('GetType',
'ns3::LrWpanMacHeader::LrWpanMacType',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanMacHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsAckReq() const [member function]
cls.add_method('IsAckReq',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsAcknowledgment() const [member function]
cls.add_method('IsAcknowledgment',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsBeacon() const [member function]
cls.add_method('IsBeacon',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsCommand() const [member function]
cls.add_method('IsCommand',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsData() const [member function]
cls.add_method('IsData',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsFrmPend() const [member function]
cls.add_method('IsFrmPend',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsPanIdComp() const [member function]
cls.add_method('IsPanIdComp',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsSecEnable() const [member function]
cls.add_method('IsSecEnable',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetAckReq() [member function]
cls.add_method('SetAckReq',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetDstAddrFields(uint16_t panId, ns3::Mac16Address addr) [member function]
cls.add_method('SetDstAddrFields',
'void',
[param('uint16_t', 'panId'), param('ns3::Mac16Address', 'addr')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetDstAddrFields(uint16_t panId, ns3::Mac64Address addr) [member function]
cls.add_method('SetDstAddrFields',
'void',
[param('uint16_t', 'panId'), param('ns3::Mac64Address', 'addr')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetDstAddrMode(uint8_t addrMode) [member function]
cls.add_method('SetDstAddrMode',
'void',
[param('uint8_t', 'addrMode')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetFrameControl(uint16_t frameControl) [member function]
cls.add_method('SetFrameControl',
'void',
[param('uint16_t', 'frameControl')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetFrameVer(uint8_t ver) [member function]
cls.add_method('SetFrameVer',
'void',
[param('uint8_t', 'ver')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetFrmCounter(uint32_t frmCntr) [member function]
cls.add_method('SetFrmCounter',
'void',
[param('uint32_t', 'frmCntr')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetFrmCtrlRes(uint8_t res) [member function]
cls.add_method('SetFrmCtrlRes',
'void',
[param('uint8_t', 'res')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetFrmPend() [member function]
cls.add_method('SetFrmPend',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetKeyId(uint8_t keyIndex) [member function]
cls.add_method('SetKeyId',
'void',
[param('uint8_t', 'keyIndex')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetKeyId(uint32_t keySrc, uint8_t keyIndex) [member function]
cls.add_method('SetKeyId',
'void',
[param('uint32_t', 'keySrc'), param('uint8_t', 'keyIndex')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetKeyId(uint64_t keySrc, uint8_t keyIndex) [member function]
cls.add_method('SetKeyId',
'void',
[param('uint64_t', 'keySrc'), param('uint8_t', 'keyIndex')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetKeyIdMode(uint8_t keyIdMode) [member function]
cls.add_method('SetKeyIdMode',
'void',
[param('uint8_t', 'keyIdMode')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetNoAckReq() [member function]
cls.add_method('SetNoAckReq',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetNoFrmPend() [member function]
cls.add_method('SetNoFrmPend',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetNoPanIdComp() [member function]
cls.add_method('SetNoPanIdComp',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetPanIdComp() [member function]
cls.add_method('SetPanIdComp',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSecControl(uint8_t secLevel) [member function]
cls.add_method('SetSecControl',
'void',
[param('uint8_t', 'secLevel')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSecCtrlReserved(uint8_t res) [member function]
cls.add_method('SetSecCtrlReserved',
'void',
[param('uint8_t', 'res')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSecDisable() [member function]
cls.add_method('SetSecDisable',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSecEnable() [member function]
cls.add_method('SetSecEnable',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSecLevel(uint8_t secLevel) [member function]
cls.add_method('SetSecLevel',
'void',
[param('uint8_t', 'secLevel')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSeqNum(uint8_t seqNum) [member function]
cls.add_method('SetSeqNum',
'void',
[param('uint8_t', 'seqNum')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSrcAddrFields(uint16_t panId, ns3::Mac16Address addr) [member function]
cls.add_method('SetSrcAddrFields',
'void',
[param('uint16_t', 'panId'), param('ns3::Mac16Address', 'addr')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSrcAddrFields(uint16_t panId, ns3::Mac64Address addr) [member function]
cls.add_method('SetSrcAddrFields',
'void',
[param('uint16_t', 'panId'), param('ns3::Mac64Address', 'addr')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSrcAddrMode(uint8_t addrMode) [member function]
cls.add_method('SetSrcAddrMode',
'void',
[param('uint8_t', 'addrMode')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetType(ns3::LrWpanMacHeader::LrWpanMacType wpanMacType) [member function]
cls.add_method('SetType',
'void',
[param('ns3::LrWpanMacHeader::LrWpanMacType', 'wpanMacType')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3PcapFileWrapper_methods(root_module, cls):
## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor]
cls.add_constructor([])
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header const & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Header const &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')])
## pcap-file-wrapper.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::PcapFileWrapper::Read(ns3::Time & t) [member function]
cls.add_method('Read',
'ns3::Ptr< ns3::Packet >',
[param('ns3::Time &', 't')])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3LrWpanInterferenceHelper_Ns3Empty_Ns3DefaultDeleter__lt__ns3LrWpanInterferenceHelper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter< ns3::LrWpanInterferenceHelper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3SpectrumSignalParameters_Ns3Empty_Ns3DefaultDeleter__lt__ns3SpectrumSignalParameters__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter< ns3::SpectrumSignalParameters > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SpectrumPhy_methods(root_module, cls):
## spectrum-phy.h (module 'spectrum'): ns3::SpectrumPhy::SpectrumPhy() [constructor]
cls.add_constructor([])
## spectrum-phy.h (module 'spectrum'): static ns3::TypeId ns3::SpectrumPhy::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## spectrum-phy.h (module 'spectrum'): void ns3::SpectrumPhy::SetDevice(ns3::Ptr<ns3::NetDevice> d) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'd')],
is_pure_virtual=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): ns3::Ptr<ns3::NetDevice> ns3::SpectrumPhy::GetDevice() const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): void ns3::SpectrumPhy::SetMobility(ns3::Ptr<ns3::MobilityModel> m) [member function]
cls.add_method('SetMobility',
'void',
[param('ns3::Ptr< ns3::MobilityModel >', 'm')],
is_pure_virtual=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): ns3::Ptr<ns3::MobilityModel> ns3::SpectrumPhy::GetMobility() [member function]
cls.add_method('GetMobility',
'ns3::Ptr< ns3::MobilityModel >',
[],
is_pure_virtual=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): void ns3::SpectrumPhy::SetChannel(ns3::Ptr<ns3::SpectrumChannel> c) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::SpectrumChannel >', 'c')],
is_pure_virtual=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): ns3::Ptr<ns3::SpectrumModel const> ns3::SpectrumPhy::GetRxSpectrumModel() const [member function]
cls.add_method('GetRxSpectrumModel',
'ns3::Ptr< ns3::SpectrumModel const >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): ns3::Ptr<ns3::AntennaModel> ns3::SpectrumPhy::GetRxAntenna() [member function]
cls.add_method('GetRxAntenna',
'ns3::Ptr< ns3::AntennaModel >',
[],
is_pure_virtual=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): void ns3::SpectrumPhy::StartRx(ns3::Ptr<ns3::SpectrumSignalParameters> params) [member function]
cls.add_method('StartRx',
'void',
[param('ns3::Ptr< ns3::SpectrumSignalParameters >', 'params')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SpectrumSignalParameters_methods(root_module, cls):
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::SpectrumSignalParameters() [constructor]
cls.add_constructor([])
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::SpectrumSignalParameters(ns3::SpectrumSignalParameters const & p) [copy constructor]
cls.add_constructor([param('ns3::SpectrumSignalParameters const &', 'p')])
## spectrum-signal-parameters.h (module 'spectrum'): ns3::Ptr<ns3::SpectrumSignalParameters> ns3::SpectrumSignalParameters::Copy() [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::SpectrumSignalParameters >',
[],
is_virtual=True)
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::duration [variable]
cls.add_instance_attribute('duration', 'ns3::Time', is_const=False)
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::psd [variable]
cls.add_instance_attribute('psd', 'ns3::Ptr< ns3::SpectrumValue >', is_const=False)
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::txAntenna [variable]
cls.add_instance_attribute('txAntenna', 'ns3::Ptr< ns3::AntennaModel >', is_const=False)
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::txPhy [variable]
cls.add_instance_attribute('txPhy', 'ns3::Ptr< ns3::SpectrumPhy >', is_const=False)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BooleanChecker_methods(root_module, cls):
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')])
return
def register_Ns3BooleanValue_methods(root_module, cls):
cls.add_output_stream_operator()
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(bool value) [constructor]
cls.add_constructor([param('bool', 'value')])
## boolean.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## boolean.h (module 'core'): std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): void ns3::BooleanValue::Set(bool value) [member function]
cls.add_method('Set',
'void',
[param('bool', 'value')])
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3DoubleValue_methods(root_module, cls):
## double.h (module 'core'): ns3::DoubleValue::DoubleValue() [constructor]
cls.add_constructor([])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(ns3::DoubleValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DoubleValue const &', 'arg0')])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(double const & value) [constructor]
cls.add_constructor([param('double const &', 'value')])
## double.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::DoubleValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## double.h (module 'core'): bool ns3::DoubleValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## double.h (module 'core'): double ns3::DoubleValue::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## double.h (module 'core'): std::string ns3::DoubleValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## double.h (module 'core'): void ns3::DoubleValue::Set(double const & value) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'value')])
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnumChecker_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): void ns3::EnumChecker::Add(int value, std::string name) [member function]
cls.add_method('Add',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): void ns3::EnumChecker::AddDefault(int value, std::string name) [member function]
cls.add_method('AddDefault',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EnumValue_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumValue const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumValue::EnumValue() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): ns3::EnumValue::EnumValue(int value) [constructor]
cls.add_constructor([param('int', 'value')])
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## enum.h (module 'core'): int ns3::EnumValue::Get() const [member function]
cls.add_method('Get',
'int',
[],
is_const=True)
## enum.h (module 'core'): std::string ns3::EnumValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): void ns3::EnumValue::Set(int value) [member function]
cls.add_method('Set',
'void',
[param('int', 'value')])
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3IntegerValue_methods(root_module, cls):
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue() [constructor]
cls.add_constructor([])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(ns3::IntegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntegerValue const &', 'arg0')])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(int64_t const & value) [constructor]
cls.add_constructor([param('int64_t const &', 'value')])
## integer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::IntegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## integer.h (module 'core'): bool ns3::IntegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## integer.h (module 'core'): int64_t ns3::IntegerValue::Get() const [member function]
cls.add_method('Get',
'int64_t',
[],
is_const=True)
## integer.h (module 'core'): std::string ns3::IntegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## integer.h (module 'core'): void ns3::IntegerValue::Set(int64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('int64_t const &', 'value')])
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3LrWpanCsmaCa_methods(root_module, cls):
## lr-wpan-csmaca.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanCsmaCa::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): ns3::LrWpanCsmaCa::LrWpanCsmaCa() [constructor]
cls.add_constructor([])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetMac(ns3::Ptr<ns3::LrWpanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::LrWpanMac >', 'mac')])
## lr-wpan-csmaca.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanMac> ns3::LrWpanCsmaCa::GetMac() const [member function]
cls.add_method('GetMac',
'ns3::Ptr< ns3::LrWpanMac >',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetSlottedCsmaCa() [member function]
cls.add_method('SetSlottedCsmaCa',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetUnSlottedCsmaCa() [member function]
cls.add_method('SetUnSlottedCsmaCa',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): bool ns3::LrWpanCsmaCa::IsSlottedCsmaCa() const [member function]
cls.add_method('IsSlottedCsmaCa',
'bool',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): bool ns3::LrWpanCsmaCa::IsUnSlottedCsmaCa() const [member function]
cls.add_method('IsUnSlottedCsmaCa',
'bool',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetMacMinBE(uint8_t macMinBE) [member function]
cls.add_method('SetMacMinBE',
'void',
[param('uint8_t', 'macMinBE')])
## lr-wpan-csmaca.h (module 'lr-wpan'): uint8_t ns3::LrWpanCsmaCa::GetMacMinBE() const [member function]
cls.add_method('GetMacMinBE',
'uint8_t',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetMacMaxBE(uint8_t macMaxBE) [member function]
cls.add_method('SetMacMaxBE',
'void',
[param('uint8_t', 'macMaxBE')])
## lr-wpan-csmaca.h (module 'lr-wpan'): uint8_t ns3::LrWpanCsmaCa::GetMacMaxBE() const [member function]
cls.add_method('GetMacMaxBE',
'uint8_t',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetMacMaxCSMABackoffs(uint8_t macMaxCSMABackoffs) [member function]
cls.add_method('SetMacMaxCSMABackoffs',
'void',
[param('uint8_t', 'macMaxCSMABackoffs')])
## lr-wpan-csmaca.h (module 'lr-wpan'): uint8_t ns3::LrWpanCsmaCa::GetMacMaxCSMABackoffs() const [member function]
cls.add_method('GetMacMaxCSMABackoffs',
'uint8_t',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetUnitBackoffPeriod(uint64_t unitBackoffPeriod) [member function]
cls.add_method('SetUnitBackoffPeriod',
'void',
[param('uint64_t', 'unitBackoffPeriod')])
## lr-wpan-csmaca.h (module 'lr-wpan'): uint64_t ns3::LrWpanCsmaCa::GetUnitBackoffPeriod() const [member function]
cls.add_method('GetUnitBackoffPeriod',
'uint64_t',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): ns3::Time ns3::LrWpanCsmaCa::GetTimeToNextSlot() const [member function]
cls.add_method('GetTimeToNextSlot',
'ns3::Time',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::Start() [member function]
cls.add_method('Start',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::RandomBackoffDelay() [member function]
cls.add_method('RandomBackoffDelay',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::CanProceed() [member function]
cls.add_method('CanProceed',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::RequestCCA() [member function]
cls.add_method('RequestCCA',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::PlmeCcaConfirm(ns3::LrWpanPhyEnumeration status) [member function]
cls.add_method('PlmeCcaConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status')])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetLrWpanMacStateCallback(ns3::LrWpanMacStateCallback macState) [member function]
cls.add_method('SetLrWpanMacStateCallback',
'void',
[param('ns3::LrWpanMacStateCallback', 'macState')])
## lr-wpan-csmaca.h (module 'lr-wpan'): int64_t ns3::LrWpanCsmaCa::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## lr-wpan-csmaca.h (module 'lr-wpan'): uint8_t ns3::LrWpanCsmaCa::GetNB() [member function]
cls.add_method('GetNB',
'uint8_t',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3LrWpanErrorModel_methods(root_module, cls):
## lr-wpan-error-model.h (module 'lr-wpan'): ns3::LrWpanErrorModel::LrWpanErrorModel(ns3::LrWpanErrorModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanErrorModel const &', 'arg0')])
## lr-wpan-error-model.h (module 'lr-wpan'): ns3::LrWpanErrorModel::LrWpanErrorModel() [constructor]
cls.add_constructor([])
## lr-wpan-error-model.h (module 'lr-wpan'): double ns3::LrWpanErrorModel::GetChunkSuccessRate(double snr, uint32_t nbits) const [member function]
cls.add_method('GetChunkSuccessRate',
'double',
[param('double', 'snr'), param('uint32_t', 'nbits')],
is_const=True)
## lr-wpan-error-model.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanErrorModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3LrWpanInterferenceHelper_methods(root_module, cls):
## lr-wpan-interference-helper.h (module 'lr-wpan'): ns3::LrWpanInterferenceHelper::LrWpanInterferenceHelper(ns3::Ptr<ns3::SpectrumModel const> spectrumModel) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::SpectrumModel const >', 'spectrumModel')])
## lr-wpan-interference-helper.h (module 'lr-wpan'): bool ns3::LrWpanInterferenceHelper::AddSignal(ns3::Ptr<ns3::SpectrumValue const> signal) [member function]
cls.add_method('AddSignal',
'bool',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'signal')])
## lr-wpan-interference-helper.h (module 'lr-wpan'): bool ns3::LrWpanInterferenceHelper::RemoveSignal(ns3::Ptr<ns3::SpectrumValue const> signal) [member function]
cls.add_method('RemoveSignal',
'bool',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'signal')])
## lr-wpan-interference-helper.h (module 'lr-wpan'): void ns3::LrWpanInterferenceHelper::ClearSignals() [member function]
cls.add_method('ClearSignals',
'void',
[])
## lr-wpan-interference-helper.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumValue> ns3::LrWpanInterferenceHelper::GetSignalPsd() const [member function]
cls.add_method('GetSignalPsd',
'ns3::Ptr< ns3::SpectrumValue >',
[],
is_const=True)
## lr-wpan-interference-helper.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumModel const> ns3::LrWpanInterferenceHelper::GetSpectrumModel() const [member function]
cls.add_method('GetSpectrumModel',
'ns3::Ptr< ns3::SpectrumModel const >',
[],
is_const=True)
return
def register_Ns3LrWpanMac_methods(root_module, cls):
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::LrWpanMac(ns3::LrWpanMac const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanMac const &', 'arg0')])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::LrWpanMac() [constructor]
cls.add_constructor([])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanAssociationStatus ns3::LrWpanMac::GetAssociationStatus() const [member function]
cls.add_method('GetAssociationStatus',
'ns3::LrWpanAssociationStatus',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::Mac64Address ns3::LrWpanMac::GetExtendedAddress() const [member function]
cls.add_method('GetExtendedAddress',
'ns3::Mac64Address',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): uint64_t ns3::LrWpanMac::GetMacAckWaitDuration() const [member function]
cls.add_method('GetMacAckWaitDuration',
'uint64_t',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): uint8_t ns3::LrWpanMac::GetMacMaxFrameRetries() const [member function]
cls.add_method('GetMacMaxFrameRetries',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): uint16_t ns3::LrWpanMac::GetPanId() const [member function]
cls.add_method('GetPanId',
'uint16_t',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanPhy> ns3::LrWpanMac::GetPhy() [member function]
cls.add_method('GetPhy',
'ns3::Ptr< ns3::LrWpanPhy >',
[])
## lr-wpan-mac.h (module 'lr-wpan'): bool ns3::LrWpanMac::GetRxOnWhenIdle() [member function]
cls.add_method('GetRxOnWhenIdle',
'bool',
[])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::Mac16Address ns3::LrWpanMac::GetShortAddress() const [member function]
cls.add_method('GetShortAddress',
'ns3::Mac16Address',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanMac::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::McpsDataRequest(ns3::McpsDataRequestParams params, ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('McpsDataRequest',
'void',
[param('ns3::McpsDataRequestParams', 'params'), param('ns3::Ptr< ns3::Packet >', 'p')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PdDataConfirm(ns3::LrWpanPhyEnumeration status) [member function]
cls.add_method('PdDataConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PdDataIndication(uint32_t psduLength, ns3::Ptr<ns3::Packet> p, uint8_t lqi) [member function]
cls.add_method('PdDataIndication',
'void',
[param('uint32_t', 'psduLength'), param('ns3::Ptr< ns3::Packet >', 'p'), param('uint8_t', 'lqi')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PlmeCcaConfirm(ns3::LrWpanPhyEnumeration status) [member function]
cls.add_method('PlmeCcaConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PlmeEdConfirm(ns3::LrWpanPhyEnumeration status, uint8_t energyLevel) [member function]
cls.add_method('PlmeEdConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status'), param('uint8_t', 'energyLevel')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PlmeGetAttributeConfirm(ns3::LrWpanPhyEnumeration status, ns3::LrWpanPibAttributeIdentifier id, ns3::LrWpanPhyPibAttributes * attribute) [member function]
cls.add_method('PlmeGetAttributeConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status'), param('ns3::LrWpanPibAttributeIdentifier', 'id'), param('ns3::LrWpanPhyPibAttributes *', 'attribute')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PlmeSetAttributeConfirm(ns3::LrWpanPhyEnumeration status, ns3::LrWpanPibAttributeIdentifier id) [member function]
cls.add_method('PlmeSetAttributeConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status'), param('ns3::LrWpanPibAttributeIdentifier', 'id')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PlmeSetTRXStateConfirm(ns3::LrWpanPhyEnumeration status) [member function]
cls.add_method('PlmeSetTRXStateConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetAssociationStatus(ns3::LrWpanAssociationStatus status) [member function]
cls.add_method('SetAssociationStatus',
'void',
[param('ns3::LrWpanAssociationStatus', 'status')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetCsmaCa(ns3::Ptr<ns3::LrWpanCsmaCa> csmaCa) [member function]
cls.add_method('SetCsmaCa',
'void',
[param('ns3::Ptr< ns3::LrWpanCsmaCa >', 'csmaCa')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetExtendedAddress(ns3::Mac64Address address) [member function]
cls.add_method('SetExtendedAddress',
'void',
[param('ns3::Mac64Address', 'address')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetLrWpanMacState(ns3::LrWpanMacState macState) [member function]
cls.add_method('SetLrWpanMacState',
'void',
[param('ns3::LrWpanMacState', 'macState')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetMacMaxFrameRetries(uint8_t retries) [member function]
cls.add_method('SetMacMaxFrameRetries',
'void',
[param('uint8_t', 'retries')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetMcpsDataConfirmCallback(ns3::McpsDataConfirmCallback c) [member function]
cls.add_method('SetMcpsDataConfirmCallback',
'void',
[param('ns3::McpsDataConfirmCallback', 'c')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetMcpsDataIndicationCallback(ns3::McpsDataIndicationCallback c) [member function]
cls.add_method('SetMcpsDataIndicationCallback',
'void',
[param('ns3::McpsDataIndicationCallback', 'c')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetPanId(uint16_t panId) [member function]
cls.add_method('SetPanId',
'void',
[param('uint16_t', 'panId')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetPhy(ns3::Ptr<ns3::LrWpanPhy> phy) [member function]
cls.add_method('SetPhy',
'void',
[param('ns3::Ptr< ns3::LrWpanPhy >', 'phy')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetRxOnWhenIdle(bool rxOnWhenIdle) [member function]
cls.add_method('SetRxOnWhenIdle',
'void',
[param('bool', 'rxOnWhenIdle')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetShortAddress(ns3::Mac16Address address) [member function]
cls.add_method('SetShortAddress',
'void',
[param('ns3::Mac16Address', 'address')])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::aMinMPDUOverhead [variable]
cls.add_static_attribute('aMinMPDUOverhead', 'uint32_t const', is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_aBaseSlotDuration [variable]
cls.add_instance_attribute('m_aBaseSlotDuration', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_aBaseSuperframeDuration [variable]
cls.add_instance_attribute('m_aBaseSuperframeDuration', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_aNumSuperframeSlots [variable]
cls.add_instance_attribute('m_aNumSuperframeSlots', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macBeaconOrder [variable]
cls.add_instance_attribute('m_macBeaconOrder', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macBeaconTxTime [variable]
cls.add_instance_attribute('m_macBeaconTxTime', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macDsn [variable]
cls.add_instance_attribute('m_macDsn', 'ns3::SequenceNumber8', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macMaxFrameRetries [variable]
cls.add_instance_attribute('m_macMaxFrameRetries', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macPanId [variable]
cls.add_instance_attribute('m_macPanId', 'uint16_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macPromiscuousMode [variable]
cls.add_instance_attribute('m_macPromiscuousMode', 'bool', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macRxOnWhenIdle [variable]
cls.add_instance_attribute('m_macRxOnWhenIdle', 'bool', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macSuperframeOrder [variable]
cls.add_instance_attribute('m_macSuperframeOrder', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macSyncSymbolOffset [variable]
cls.add_instance_attribute('m_macSyncSymbolOffset', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3LrWpanMacTrailer_methods(root_module, cls):
## lr-wpan-mac-trailer.h (module 'lr-wpan'): ns3::LrWpanMacTrailer::LrWpanMacTrailer(ns3::LrWpanMacTrailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanMacTrailer const &', 'arg0')])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): ns3::LrWpanMacTrailer::LrWpanMacTrailer() [constructor]
cls.add_constructor([])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): bool ns3::LrWpanMacTrailer::CheckFcs(ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('CheckFcs',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p')])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacTrailer::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): void ns3::LrWpanMacTrailer::EnableFcs(bool enable) [member function]
cls.add_method('EnableFcs',
'void',
[param('bool', 'enable')])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): uint16_t ns3::LrWpanMacTrailer::GetFcs() const [member function]
cls.add_method('GetFcs',
'uint16_t',
[],
is_const=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): ns3::TypeId ns3::LrWpanMacTrailer::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacTrailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanMacTrailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): bool ns3::LrWpanMacTrailer::IsFcsEnabled() [member function]
cls.add_method('IsFcsEnabled',
'bool',
[])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): void ns3::LrWpanMacTrailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): void ns3::LrWpanMacTrailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): void ns3::LrWpanMacTrailer::SetFcs(ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('SetFcs',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'p')])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): ns3::LrWpanMacTrailer::LR_WPAN_MAC_FCS_LENGTH [variable]
cls.add_static_attribute('LR_WPAN_MAC_FCS_LENGTH', 'uint16_t const', is_const=True)
return
def register_Ns3LrWpanPhy_methods(root_module, cls):
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhy::aMaxPhyPacketSize [variable]
cls.add_static_attribute('aMaxPhyPacketSize', 'uint32_t const', is_const=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhy::aTurnaroundTime [variable]
cls.add_static_attribute('aTurnaroundTime', 'uint32_t const', is_const=True)
## lr-wpan-phy.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanPhy::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhy::LrWpanPhy() [constructor]
cls.add_constructor([])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetMobility(ns3::Ptr<ns3::MobilityModel> m) [member function]
cls.add_method('SetMobility',
'void',
[param('ns3::Ptr< ns3::MobilityModel >', 'm')],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::MobilityModel> ns3::LrWpanPhy::GetMobility() [member function]
cls.add_method('GetMobility',
'ns3::Ptr< ns3::MobilityModel >',
[],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetChannel(ns3::Ptr<ns3::SpectrumChannel> c) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::SpectrumChannel >', 'c')],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumChannel> ns3::LrWpanPhy::GetChannel() [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::SpectrumChannel >',
[])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetDevice(ns3::Ptr<ns3::NetDevice> d) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'd')],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::NetDevice> ns3::LrWpanPhy::GetDevice() const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_const=True, is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetAntenna(ns3::Ptr<ns3::AntennaModel> a) [member function]
cls.add_method('SetAntenna',
'void',
[param('ns3::Ptr< ns3::AntennaModel >', 'a')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::AntennaModel> ns3::LrWpanPhy::GetRxAntenna() [member function]
cls.add_method('GetRxAntenna',
'ns3::Ptr< ns3::AntennaModel >',
[],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumModel const> ns3::LrWpanPhy::GetRxSpectrumModel() const [member function]
cls.add_method('GetRxSpectrumModel',
'ns3::Ptr< ns3::SpectrumModel const >',
[],
is_const=True, is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetTxPowerSpectralDensity(ns3::Ptr<ns3::SpectrumValue> txPsd) [member function]
cls.add_method('SetTxPowerSpectralDensity',
'void',
[param('ns3::Ptr< ns3::SpectrumValue >', 'txPsd')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetNoisePowerSpectralDensity(ns3::Ptr<ns3::SpectrumValue const> noisePsd) [member function]
cls.add_method('SetNoisePowerSpectralDensity',
'void',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'noisePsd')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumValue const> ns3::LrWpanPhy::GetNoisePowerSpectralDensity() [member function]
cls.add_method('GetNoisePowerSpectralDensity',
'ns3::Ptr< ns3::SpectrumValue const >',
[])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::StartRx(ns3::Ptr<ns3::SpectrumSignalParameters> params) [member function]
cls.add_method('StartRx',
'void',
[param('ns3::Ptr< ns3::SpectrumSignalParameters >', 'params')],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PdDataRequest(uint32_t const psduLength, ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('PdDataRequest',
'void',
[param('uint32_t const', 'psduLength'), param('ns3::Ptr< ns3::Packet >', 'p')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PlmeCcaRequest() [member function]
cls.add_method('PlmeCcaRequest',
'void',
[])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PlmeEdRequest() [member function]
cls.add_method('PlmeEdRequest',
'void',
[])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PlmeGetAttributeRequest(ns3::LrWpanPibAttributeIdentifier id) [member function]
cls.add_method('PlmeGetAttributeRequest',
'void',
[param('ns3::LrWpanPibAttributeIdentifier', 'id')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PlmeSetTRXStateRequest(ns3::LrWpanPhyEnumeration state) [member function]
cls.add_method('PlmeSetTRXStateRequest',
'void',
[param('ns3::LrWpanPhyEnumeration', 'state')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PlmeSetAttributeRequest(ns3::LrWpanPibAttributeIdentifier id, ns3::LrWpanPhyPibAttributes * attribute) [member function]
cls.add_method('PlmeSetAttributeRequest',
'void',
[param('ns3::LrWpanPibAttributeIdentifier', 'id'), param('ns3::LrWpanPhyPibAttributes *', 'attribute')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPdDataIndicationCallback(ns3::PdDataIndicationCallback c) [member function]
cls.add_method('SetPdDataIndicationCallback',
'void',
[param('ns3::PdDataIndicationCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPdDataConfirmCallback(ns3::PdDataConfirmCallback c) [member function]
cls.add_method('SetPdDataConfirmCallback',
'void',
[param('ns3::PdDataConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPlmeCcaConfirmCallback(ns3::PlmeCcaConfirmCallback c) [member function]
cls.add_method('SetPlmeCcaConfirmCallback',
'void',
[param('ns3::PlmeCcaConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPlmeEdConfirmCallback(ns3::PlmeEdConfirmCallback c) [member function]
cls.add_method('SetPlmeEdConfirmCallback',
'void',
[param('ns3::PlmeEdConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPlmeGetAttributeConfirmCallback(ns3::PlmeGetAttributeConfirmCallback c) [member function]
cls.add_method('SetPlmeGetAttributeConfirmCallback',
'void',
[param('ns3::PlmeGetAttributeConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPlmeSetTRXStateConfirmCallback(ns3::PlmeSetTRXStateConfirmCallback c) [member function]
cls.add_method('SetPlmeSetTRXStateConfirmCallback',
'void',
[param('ns3::PlmeSetTRXStateConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPlmeSetAttributeConfirmCallback(ns3::PlmeSetAttributeConfirmCallback c) [member function]
cls.add_method('SetPlmeSetAttributeConfirmCallback',
'void',
[param('ns3::PlmeSetAttributeConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): double ns3::LrWpanPhy::GetDataOrSymbolRate(bool isData) [member function]
cls.add_method('GetDataOrSymbolRate',
'double',
[param('bool', 'isData')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetErrorModel(ns3::Ptr<ns3::LrWpanErrorModel> e) [member function]
cls.add_method('SetErrorModel',
'void',
[param('ns3::Ptr< ns3::LrWpanErrorModel >', 'e')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanErrorModel> ns3::LrWpanPhy::GetErrorModel() const [member function]
cls.add_method('GetErrorModel',
'ns3::Ptr< ns3::LrWpanErrorModel >',
[],
is_const=True)
## lr-wpan-phy.h (module 'lr-wpan'): uint64_t ns3::LrWpanPhy::GetPhySHRDuration() const [member function]
cls.add_method('GetPhySHRDuration',
'uint64_t',
[],
is_const=True)
## lr-wpan-phy.h (module 'lr-wpan'): double ns3::LrWpanPhy::GetPhySymbolsPerOctet() const [member function]
cls.add_method('GetPhySymbolsPerOctet',
'double',
[],
is_const=True)
## lr-wpan-phy.h (module 'lr-wpan'): int64_t ns3::LrWpanPhy::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3LrWpanSpectrumSignalParameters_methods(root_module, cls):
## lr-wpan-spectrum-signal-parameters.h (module 'lr-wpan'): ns3::LrWpanSpectrumSignalParameters::LrWpanSpectrumSignalParameters() [constructor]
cls.add_constructor([])
## lr-wpan-spectrum-signal-parameters.h (module 'lr-wpan'): ns3::LrWpanSpectrumSignalParameters::LrWpanSpectrumSignalParameters(ns3::LrWpanSpectrumSignalParameters const & p) [copy constructor]
cls.add_constructor([param('ns3::LrWpanSpectrumSignalParameters const &', 'p')])
## lr-wpan-spectrum-signal-parameters.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumSignalParameters> ns3::LrWpanSpectrumSignalParameters::Copy() [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::SpectrumSignalParameters >',
[],
is_virtual=True)
## lr-wpan-spectrum-signal-parameters.h (module 'lr-wpan'): ns3::LrWpanSpectrumSignalParameters::packetBurst [variable]
cls.add_instance_attribute('packetBurst', 'ns3::Ptr< ns3::PacketBurst >', is_const=False)
return
def register_Ns3Mac16AddressChecker_methods(root_module, cls):
## mac16-address.h (module 'network'): ns3::Mac16AddressChecker::Mac16AddressChecker() [constructor]
cls.add_constructor([])
## mac16-address.h (module 'network'): ns3::Mac16AddressChecker::Mac16AddressChecker(ns3::Mac16AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac16AddressChecker const &', 'arg0')])
return
def register_Ns3Mac16AddressValue_methods(root_module, cls):
## mac16-address.h (module 'network'): ns3::Mac16AddressValue::Mac16AddressValue() [constructor]
cls.add_constructor([])
## mac16-address.h (module 'network'): ns3::Mac16AddressValue::Mac16AddressValue(ns3::Mac16AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac16AddressValue const &', 'arg0')])
## mac16-address.h (module 'network'): ns3::Mac16AddressValue::Mac16AddressValue(ns3::Mac16Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac16Address const &', 'value')])
## mac16-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac16AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac16-address.h (module 'network'): bool ns3::Mac16AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac16-address.h (module 'network'): ns3::Mac16Address ns3::Mac16AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac16Address',
[],
is_const=True)
## mac16-address.h (module 'network'): std::string ns3::Mac16AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac16-address.h (module 'network'): void ns3::Mac16AddressValue::Set(ns3::Mac16Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac16Address const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3Mac64AddressChecker_methods(root_module, cls):
## mac64-address.h (module 'network'): ns3::Mac64AddressChecker::Mac64AddressChecker() [constructor]
cls.add_constructor([])
## mac64-address.h (module 'network'): ns3::Mac64AddressChecker::Mac64AddressChecker(ns3::Mac64AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac64AddressChecker const &', 'arg0')])
return
def register_Ns3Mac64AddressValue_methods(root_module, cls):
## mac64-address.h (module 'network'): ns3::Mac64AddressValue::Mac64AddressValue() [constructor]
cls.add_constructor([])
## mac64-address.h (module 'network'): ns3::Mac64AddressValue::Mac64AddressValue(ns3::Mac64AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac64AddressValue const &', 'arg0')])
## mac64-address.h (module 'network'): ns3::Mac64AddressValue::Mac64AddressValue(ns3::Mac64Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac64Address const &', 'value')])
## mac64-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac64AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac64-address.h (module 'network'): bool ns3::Mac64AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac64-address.h (module 'network'): ns3::Mac64Address ns3::Mac64AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac64Address',
[],
is_const=True)
## mac64-address.h (module 'network'): std::string ns3::Mac64AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac64-address.h (module 'network'): void ns3::Mac64AddressValue::Set(ns3::Mac64Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac64Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function]
cls.add_method('GetLocalTime',
'ns3::Time',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3LrWpanNetDevice_methods(root_module, cls):
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::LrWpanNetDevice::LrWpanNetDevice(ns3::LrWpanNetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanNetDevice const &', 'arg0')])
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::LrWpanNetDevice::LrWpanNetDevice() [constructor]
cls.add_constructor([])
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): int64_t ns3::LrWpanNetDevice::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Address ns3::LrWpanNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Address ns3::LrWpanNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Ptr<ns3::Channel> ns3::LrWpanNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanCsmaCa> ns3::LrWpanNetDevice::GetCsmaCa() const [member function]
cls.add_method('GetCsmaCa',
'ns3::Ptr< ns3::LrWpanCsmaCa >',
[],
is_const=True)
## lr-wpan-net-device.h (module 'lr-wpan'): uint32_t ns3::LrWpanNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanMac> ns3::LrWpanNetDevice::GetMac() const [member function]
cls.add_method('GetMac',
'ns3::Ptr< ns3::LrWpanMac >',
[],
is_const=True)
## lr-wpan-net-device.h (module 'lr-wpan'): uint16_t ns3::LrWpanNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Address ns3::LrWpanNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Address ns3::LrWpanNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Ptr<ns3::Node> ns3::LrWpanNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanPhy> ns3::LrWpanNetDevice::GetPhy() const [member function]
cls.add_method('GetPhy',
'ns3::Ptr< ns3::LrWpanPhy >',
[],
is_const=True)
## lr-wpan-net-device.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::McpsDataIndication(ns3::McpsDataIndicationParams params, ns3::Ptr<ns3::Packet> pkt) [member function]
cls.add_method('McpsDataIndication',
'void',
[param('ns3::McpsDataIndicationParams', 'params'), param('ns3::Ptr< ns3::Packet >', 'pkt')])
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetChannel(ns3::Ptr<ns3::SpectrumChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::SpectrumChannel >', 'channel')])
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetCsmaCa(ns3::Ptr<ns3::LrWpanCsmaCa> csmaca) [member function]
cls.add_method('SetCsmaCa',
'void',
[param('ns3::Ptr< ns3::LrWpanCsmaCa >', 'csmaca')])
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetMac(ns3::Ptr<ns3::LrWpanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::LrWpanMac >', 'mac')])
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetPhy(ns3::Ptr<ns3::LrWpanPhy> phy) [member function]
cls.add_method('SetPhy',
'void',
[param('ns3::Ptr< ns3::LrWpanPhy >', 'phy')])
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 | 3,440,807,654,304,556,000 | 64.901358 | 448 | 0.616686 | false | 3.643625 | false | false | false | 0.014769 |