commit
stringlengths 40
40
| subject
stringlengths 1
1.49k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| new_contents
stringlengths 1
29.8k
| old_contents
stringlengths 0
9.9k
| lang
stringclasses 3
values | proba
float64 0
1
|
---|---|---|---|---|---|---|---|
e0521c1f9a12819fd89f12aed01c623628dc4c4d | Build options added. | intexration/propertyhandler.py | intexration/propertyhandler.py | import configparser
import os
class Build:
def __init__(self, name, idx, bib):
self._name = name
self._idx = idx
self._bib = bib
def get_name(self):
return self._name
def get_idx(self):
return self._idx
def get_bib(self):
return self._bib
def get_tex(self):
return self._name + '.tex'
def get_pdf(self):
return self._name + '.pdf'
def get_log(self):
return self._name + '.log'
class PropertyHandler:
def __init__(self, path):
self._path = path
def get_builds(self):
builds = []
if os.path.exists(self._path):
parser = configparser.ConfigParser()
parser.read(self._path)
for build_name in parser.sections():
if parser.has_option(build_name, 'idx'):
idx = parser[build_name]['idx']
else:
idx = build_name + '.idx'
if parser.has_option(build_name, 'bib'):
bib = parser[build_name]['bib']
else:
bib = build_name
builds.append(Build(build_name, idx, bib))
return builds | import configparser
import os
class Build:
def __init__(self, name, idx, bib):
self._name = name
self._idx = idx
self._bib = bib
def get_name(self):
return self._name
def get_idx(self):
return self._idx
def get_bib(self):
return self._bib
def get_tex(self):
return self._name + '.tex'
def get_pdf(self):
return self._name + '.pdf'
def get_log(self):
return self._name + '.log'
class PropertyHandler:
def __init__(self, path):
self._path = path
def get_builds(self):
builds = []
if os.path.exists(self._path):
parser = configparser.ConfigParser()
parser.read(self._path)
for build_name in parser.sections():
idx = build_name + '.idx'
bib = build_name
builds.append(Build(build_name, idx, bib))
return builds
| Python | 0 |
e6885fd2260dc9399f5ea2f835cbf65294d18a8d | make competiable with postgres new version 8.3 | addons/report_analytic_line/report_analytic_line.py | addons/report_analytic_line/report_analytic_line.py | ##############################################################################
#
# Copyright (c) 2004-2006 TINY SPRL. (http://tiny.be) All Rights Reserved.
#
# $Id: sale.py 1005 2005-07-25 08:41:42Z nicoe $
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import fields,osv
class report_account_analytic_line_to_invoice(osv.osv):
_name = "report.account.analytic.line.to.invoice"
_description = "Analytic lines to invoice report"
_auto = False
_columns = {
'name': fields.date('Month', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic account', readonly=True),
'product_uom_id':fields.many2one('product.uom', 'UoM', readonly=True),
'unit_amount': fields.float('Units', readonly=True),
'sale_price': fields.float('Sale price', readonly=True),
'amount': fields.float('Amount', readonly=True),
}
_order = 'name desc, product_id asc, account_id asc'
def init(self, cr):
cr.execute("""
CREATE OR REPLACE VIEW report_account_analytic_line_to_invoice AS (
SELECT
DISTINCT(to_char(l.date,'YYYY-MM-DD')) AS name,
MIN(l.id) AS id,
l.product_id,
l.account_id,
SUM(l.amount) AS amount,
SUM(l.unit_amount*t.list_price) AS sale_price,
SUM(l.unit_amount) AS unit_amount,
l.product_uom_id
FROM
account_analytic_line l
left join
product_product p on (l.product_id=p.id)
left join
product_template t on (p.product_tmpl_id=t.id)
WHERE
(invoice_id IS NULL) and (to_invoice IS NOT NULL)
GROUP BY
to_char(date,'YYYY-MM-DD'), product_id, product_uom_id, account_id
)
""")
report_account_analytic_line_to_invoice()
| ##############################################################################
#
# Copyright (c) 2004-2006 TINY SPRL. (http://tiny.be) All Rights Reserved.
#
# $Id: sale.py 1005 2005-07-25 08:41:42Z nicoe $
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import fields,osv
class report_account_analytic_line_to_invoice(osv.osv):
_name = "report.account.analytic.line.to.invoice"
_description = "Analytic lines to invoice report"
_auto = False
_columns = {
'name': fields.date('Month', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic account', readonly=True),
'product_uom_id':fields.many2one('product.uom', 'UoM', readonly=True),
'unit_amount': fields.float('Units', readonly=True),
'sale_price': fields.float('Sale price', readonly=True),
'amount': fields.float('Amount', readonly=True),
}
_order = 'name desc, product_id asc, account_id asc'
def init(self, cr):
cr.execute("""
CREATE OR REPLACE VIEW report_account_analytic_line_to_invoice AS (
SELECT
DISTINCT(SUBSTRING(l.date for 7))||'-'||'01' AS name,
MIN(l.id) AS id,
l.product_id,
l.account_id,
SUM(l.amount) AS amount,
SUM(l.unit_amount*t.list_price) AS sale_price,
SUM(l.unit_amount) AS unit_amount,
l.product_uom_id
FROM
account_analytic_line l
left join
product_product p on (l.product_id=p.id)
left join
product_template t on (p.product_tmpl_id=t.id)
WHERE
(invoice_id IS NULL) and (to_invoice IS NOT NULL)
GROUP BY
SUBSTRING(date for 7), product_id, product_uom_id, account_id
)
""")
report_account_analytic_line_to_invoice()
| Python | 0 |
9069c2678b68571406458f7414c7b0474183090b | Fix check for dictionary entry | lit/Suite/lldbtest.py | lit/Suite/lldbtest.py | from __future__ import absolute_import
import os
import subprocess
import sys
import lit.Test
import lit.TestRunner
import lit.util
from lit.formats.base import TestFormat
def getBuildDir(cmd):
found = False
for arg in cmd:
if found:
return arg
if arg == '--build-dir':
found = True
return None
class LLDBTest(TestFormat):
def __init__(self, dotest_cmd):
self.dotest_cmd = dotest_cmd
def getTestsInDirectory(self, testSuite, path_in_suite, litConfig,
localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
# Ignore dot files and excluded tests.
if (filename.startswith('.') or filename in localConfig.excludes):
continue
# Ignore files that don't start with 'Test'.
if not filename.startswith('Test'):
continue
filepath = os.path.join(source_path, filename)
if not os.path.isdir(filepath):
base, ext = os.path.splitext(filename)
if ext in localConfig.suffixes:
yield lit.Test.Test(testSuite, path_in_suite +
(filename, ), localConfig)
def execute(self, test, litConfig):
if litConfig.noExecute:
return lit.Test.PASS, ''
if test.config.lldb_disable_python:
return (lit.Test.UNSUPPORTED, 'Python module disabled')
if test.config.unsupported:
return (lit.Test.UNSUPPORTED, 'Test is unsupported')
testPath, testFile = os.path.split(test.getSourcePath())
# On Windows, the system does not always correctly interpret
# shebang lines. To make sure we can execute the tests, add
# python exe as the first parameter of the command.
cmd = [sys.executable] + self.dotest_cmd + [testPath, '-p', testFile]
# The macOS system integrity protection (SIP) doesn't allow injecting
# libraries into system binaries, but this can be worked around by
# copying the binary into a different location.
if 'DYLD_INSERT_LIBRARIES' in test.config.environment and \
sys.executable.startswith('/System/'):
builddir = getBuildDir(cmd)
assert(builddir)
copied_python = os.path.join(builddir, 'copied-system-python')
import shutil
shutil.copy(sys.executable, os.path.join(builddir, copied_python))
cmd[0] = copied_python
try:
out, err, exitCode = lit.util.executeCommand(
cmd,
env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT, 'Reached timeout of {} seconds'.format(
litConfig.maxIndividualTestTime))
if exitCode:
return lit.Test.FAIL, out + err
passing_test_line = 'RESULT: PASSED'
if passing_test_line not in out and passing_test_line not in err:
msg = ('Unable to find %r in dotest output:\n\n%s%s' %
(passing_test_line, out, err))
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS, ''
| from __future__ import absolute_import
import os
import subprocess
import sys
import lit.Test
import lit.TestRunner
import lit.util
from lit.formats.base import TestFormat
def getBuildDir(cmd):
found = False
for arg in cmd:
if found:
return arg
if arg == '--build-dir':
found = True
return None
class LLDBTest(TestFormat):
def __init__(self, dotest_cmd):
self.dotest_cmd = dotest_cmd
def getTestsInDirectory(self, testSuite, path_in_suite, litConfig,
localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
# Ignore dot files and excluded tests.
if (filename.startswith('.') or filename in localConfig.excludes):
continue
# Ignore files that don't start with 'Test'.
if not filename.startswith('Test'):
continue
filepath = os.path.join(source_path, filename)
if not os.path.isdir(filepath):
base, ext = os.path.splitext(filename)
if ext in localConfig.suffixes:
yield lit.Test.Test(testSuite, path_in_suite +
(filename, ), localConfig)
def execute(self, test, litConfig):
if litConfig.noExecute:
return lit.Test.PASS, ''
if test.config.lldb_disable_python:
return (lit.Test.UNSUPPORTED, 'Python module disabled')
if test.config.unsupported:
return (lit.Test.UNSUPPORTED, 'Test is unsupported')
testPath, testFile = os.path.split(test.getSourcePath())
# On Windows, the system does not always correctly interpret
# shebang lines. To make sure we can execute the tests, add
# python exe as the first parameter of the command.
cmd = [sys.executable] + self.dotest_cmd + [testPath, '-p', testFile]
# The macOS system integrity protection (SIP) doesn't allow injecting
# libraries into system binaries, but this can be worked around by
# copying the binary into a different location.
if test.config.environment['DYLD_INSERT_LIBRARIES'] and \
sys.executable.startswith('/System/'):
builddir = getBuildDir(cmd)
assert(builddir)
copied_python = os.path.join(builddir, 'copied-system-python')
import shutil
shutil.copy(sys.executable, os.path.join(builddir, copied_python))
cmd[0] = copied_python
try:
out, err, exitCode = lit.util.executeCommand(
cmd,
env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT, 'Reached timeout of {} seconds'.format(
litConfig.maxIndividualTestTime))
if exitCode:
return lit.Test.FAIL, out + err
passing_test_line = 'RESULT: PASSED'
if passing_test_line not in out and passing_test_line not in err:
msg = ('Unable to find %r in dotest output:\n\n%s%s' %
(passing_test_line, out, err))
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS, ''
| Python | 0 |
f27dc9d2793bb555d80a5c8e6635ba246278d017 | Add DES support | simplecrypto.py | simplecrypto.py | import hashlib
import math
from base64 import b64encode, b64decode
from Crypto.Cipher import DES, AES
from Crypto import Random
random_instance = Random.new()
algorithms = {'aes': AES, 'des': DES}
def sha1(message):
return hashlib.sha1(message).hexdigest()
def md5(message):
return hashlib.md5(message).hexdigest()
def sha256(message):
return hashlib.sha256(message).hexdigest()
def sha512(message):
return hashlib.sha152(message).hexdigest()
def str_to_base64(message):
return b64encode(message)
def base64_to_str(message):
return b64decode(message)
base64 = str_to_base64
def pad(message, length, padding=' '):
return message + (length - len(message)) * padding
def pad_multiple(message, len_multiple, padding=' '):
next_length = math.ceil(len(message) / float(len_multiple)) * len_multiple
return pad(message, int(next_length), padding)
def random(n_bytes):
return random_instance.read(n_bytes)
def encrypt(message, password, algorithm='aes'):
cls = algorithms[algorithm]
iv = random(cls.block_size)
instance = cls.new(pad_multiple(password, 16),
cls.MODE_CFB,
iv)
return str_to_base64(iv + instance.encrypt(message))
def decrypt(message, password, algorithm='aes'):
message = base64_to_str(message)
iv, message = message[:AES.block_size], message[AES.block_size:]
instance = AES.new(pad_multiple(password, 16),
AES.MODE_CFB,
iv)
return instance.decrypt(message)
def encrypt_aes(message, password):
return encrypt(message, password, 'aes')
def decrypt_aes(message, password):
return decrypt(message, password, 'aes')
def encrypt_des(message, password):
return encrypt(message, password, 'des')
def decrypt_des(message, password):
return decrypt(message, password, 'des')
| import hashlib
import math
import base64
from Crypto.Cipher import DES, AES
from Crypto import Random
random_instance = Random.new()
algorithms = {'aes': AES, 'des': DES}
def sha1(message):
return hashlib.sha1(message).hexdigest()
def md5(message):
return hashlib.md5(message).hexdigest()
def sha256(message):
return hashlib.sha256(message).hexdigest()
def sha512(message):
return hashlib.sha152(message).hexdigest()
def str_to_base64(message):
return base64.b64encode(message)
def base64_to_str(message):
return base64.b64decode(message)
def pad(message, length, padding=' '):
return message + (length - len(message)) * padding
def pad_multiple(message, len_multiple, padding=' '):
next_length = math.ceil(len(message) / float(len_multiple)) * len_multiple
return pad(message, int(next_length), padding)
def random(n_bytes):
return random_instance.read(n_bytes)
def encrypt(message, password, algorithm='aes'):
cls = algorithms[algorithm]
iv = random(cls.block_size)
instance = cls.new(pad_multiple(password, 16),
cls.MODE_CFB,
iv)
return str_to_base64(iv + instance.encrypt(message))
def decrypt(message, password, algorithm='aes'):
message = base64_to_str(message)
iv, message = message[:AES.block_size], message[AES.block_size:]
instance = AES.new(pad_multiple(password, 16),
AES.MODE_CFB,
iv)
return instance.decrypt(message)
def encrypt_aes(message, password):
return encrypt(message, password, 'aes')
def decrypt_aes(message, password):
return decrypt(message, password, 'aes')
| Python | 0 |
1bd814df2c5175ac7745b2d58fbe6b82c5a941ae | add 'debug' hack | sts/util/console.py | sts/util/console.py | BEGIN = '\033[1;'
END = '\033[1;m'
class color(object):
GRAY, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, CRIMSON = map(lambda num : BEGIN + str(num) + "m", range(30, 39))
B_GRAY, B_RED, B_GREEN, B_YELLOW, B_BLUE, B_MAGENTA, B_CYAN, B_WHITE, B_CRIMSON = map(lambda num: BEGIN + str(num) + "m", range(40, 49))
NORMAL = END
class msg():
global_io_master = None
BEGIN = '\033[1;'
END = '\033[1;m'
GRAY, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, CRIMSON = map(lambda num: str(num) + "m", range(30, 39))
B_GRAY, B_RED, B_GREEN, B_YELLOW, B_BLUE, B_MAGENTA, B_CYAN, B_WHITE, B_CRIMSON = map(lambda num: str(num) + "m", range(40, 49))
@staticmethod
def interactive(message):
# todo: would be nice to simply give logger a color arg, but that doesn't exist...
print msg.BEGIN + msg.WHITE + message + msg.END
@staticmethod
def event(message):
print msg.BEGIN + msg.CYAN + message + msg.END
@staticmethod
def raw_input(message):
prompt = msg.BEGIN + msg.WHITE + message + msg.END
if msg.global_io_master:
s = msg.global_io_master.raw_input(prompt)
else:
s = raw_input(prompt)
if s == "debug":
import pdb
pdb.set_trace()
return s
@staticmethod
def success(message):
print msg.BEGIN + msg.B_GREEN + msg.BEGIN + msg.WHITE + message + msg.END
@staticmethod
def fail(message):
print msg.BEGIN + msg.B_RED + msg.BEGIN + msg.WHITE + message + msg.END
@staticmethod
def set_io_master(io_master):
msg.global_io_master = io_master
@staticmethod
def unset_io_master():
msg.global_io_master = None
| BEGIN = '\033[1;'
END = '\033[1;m'
class color(object):
GRAY, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, CRIMSON = map(lambda num : BEGIN + str(num) + "m", range(30, 39))
B_GRAY, B_RED, B_GREEN, B_YELLOW, B_BLUE, B_MAGENTA, B_CYAN, B_WHITE, B_CRIMSON = map(lambda num: BEGIN + str(num) + "m", range(40, 49))
NORMAL = END
class msg():
global_io_master = None
BEGIN = '\033[1;'
END = '\033[1;m'
GRAY, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, CRIMSON = map(lambda num: str(num) + "m", range(30, 39))
B_GRAY, B_RED, B_GREEN, B_YELLOW, B_BLUE, B_MAGENTA, B_CYAN, B_WHITE, B_CRIMSON = map(lambda num: str(num) + "m", range(40, 49))
@staticmethod
def interactive(message):
# todo: would be nice to simply give logger a color arg, but that doesn't exist...
print msg.BEGIN + msg.WHITE + message + msg.END
@staticmethod
def event(message):
print msg.BEGIN + msg.CYAN + message + msg.END
@staticmethod
def raw_input(message):
prompt = msg.BEGIN + msg.WHITE + message + msg.END
if msg.global_io_master:
return msg.global_io_master.raw_input(prompt)
else:
return raw_input(prompt)
@staticmethod
def success(message):
print msg.BEGIN + msg.B_GREEN + msg.BEGIN + msg.WHITE + message + msg.END
@staticmethod
def fail(message):
print msg.BEGIN + msg.B_RED + msg.BEGIN + msg.WHITE + message + msg.END
@staticmethod
def set_io_master(io_master):
msg.global_io_master = io_master
@staticmethod
def unset_io_master():
msg.global_io_master = None
| Python | 0.000001 |
4987412578744db64984cb40841994b3852287f7 | update evalrunner | pytools/src/IndexEval/evalrunner.py | pytools/src/IndexEval/evalrunner.py | '''
Created on 04.11.2015
@author: selen00r
'''
import datetime
from pymongo.mongo_client import MongoClient
import evalresult
class EvalRunner(object):
'''
Base class to run an evaluation of an index.
'''
def __init__(self):
'''
Constructor
'''
self.dbName = "indexdb"
self.idxATX = "atx"
self.idxCAC = "cac"
self.idxDax = "dax"
self.idxDowJones = "dowjones"
self.idxEStoxx50 = "estoxx50"
self.idxFTS100 = "ftse100"
self.idxFtseMib = "ftsemib"
self.idxHangSeng = "hangseng"
self.idxIbex = "ibex"
self.idxMDax = "mdax"
self.idxNasdaq100 = "nasdaq100"
self.idxNikkei = "nikkei"
self.idxSMI = "smi"
self.idxSP500 = "sp500"
self.idxTecDax = "tecdax"
self.allIndices = [self.idxATX, self.idxCAC, self.idxDax, self.idxDowJones, self.idxEStoxx50,
self.idxFTS100, self.idxFtseMib, self.idxHangSeng, self.idxIbex, self.idxMDax,
self.idxNasdaq100, self.idxNikkei, self.idxSMI, self.idxTecDax]
def setUp(self):
self.mongoClient = MongoClient()
self.database = self.mongoClient[self.dbName]
self.startDate = datetime.datetime( 2000, 1, 1 )
self.endDate = datetime.datetime( 2015, 12, 1 )
self.startInvest = 1000.0
self.fixedInvest = True
self.excludeChecker = evalresult.ExcludeTransaction()
self.resultCalculator = evalresult.ResultCalculator()
self.resultCalculatorEuro = evalresult.ResultCalculatorEuro(self.startInvest, self.fixedInvest)
def tearDown(self):
pass
| '''
Created on 04.11.2015
@author: selen00r
'''
import datetime
from pymongo.mongo_client import MongoClient
import evalresult
class EvalRunner(object):
'''
Base class to run an evaluation of an index.
'''
def __init__(self):
'''
Constructor
'''
self.dbName = "indexdb"
self.idxDax = "dax"
self.idxMDax = "mdax"
self.idxTecDax = "tecdax"
self.idxSP500 = "sp500"
self.idxNasdaq100 = "nasdaq100"
self.idxEStoxx50 = "estoxx50"
self.idxNikkei = "nikkei"
self.idxSMI = "smi"
self.idxATX = "atx"
self.idxCAC = "cac"
self.idxDowJones = "dowjones"
self.idxFTS100 = "fts100"
self.idxFtseMib = "ftsemib"
self.idxHangSeng = "hangseng"
self.idxIbex = "ibex"
self.allIndices = [self.idxATX, self.idxCAC, self.idxDax, self.idxDowJones, self.idxEStoxx50,
self.idxFTS100, self.idxFtseMib, self.idxHangSeng, self.idxIbex, self.idxMDax,
self.idxNasdaq100, self.idxNikkei, self.idxSMI, self.idxTecDax]
def setUp(self):
self.mongoClient = MongoClient()
self.database = self.mongoClient[self.dbName]
self.startDate = datetime.datetime( 2000, 1, 1 )
self.endDate = datetime.datetime( 2015, 10, 1 )
self.startInvest = 1000.0
self.fixedInvest = True
self.excludeChecker = evalresult.ExcludeTransaction()
self.resultCalculator = evalresult.ResultCalculator()
self.resultCalculatorEuro = evalresult.ResultCalculatorEuro(self.startInvest, self.fixedInvest)
def tearDown(self):
pass | Python | 0 |
5ba73b9dd92b55b3f02f76ae981e53744abac750 | Add an option to time SQL statements | sir/__main__.py | sir/__main__.py | # Copyright (c) 2014 Wieland Hoffmann
# License: MIT, see LICENSE for details
import argparse
import logging
import multiprocessing
from . import config
from .indexing import reindex
from .schema import SCHEMA
from sqlalchemy import exc as sa_exc
logger = logging.getLogger("sir")
def watch(args):
raise NotImplementedError
def main():
loghandler = logging.StreamHandler()
formatter = logging.Formatter(fmt="%(processName)s %(asctime)s %(levelname)s: %(message)s")
loghandler.setFormatter(formatter)
logger.addHandler(loghandler)
mplogger = multiprocessing.get_logger()
mplogger.setLevel(logging.ERROR)
mplogger.addHandler(loghandler)
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true")
parser.add_argument("--sqltimings", action="store_true")
subparsers = parser.add_subparsers()
reindex_parser = subparsers.add_parser("reindex", help="Reindexes all or a single entity type")
reindex_parser.set_defaults(func=reindex)
reindex_parser.add_argument('--entities', action='append', help="""Which
entity types to index.
Available are: %s""" % (", ".join(SCHEMA.keys())))
watch_parser = subparsers.add_parser("watch", help="Watches for incoming messages on an AMQP queue")
watch_parser.set_defaults(func=watch)
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if args.sqltimings:
from sqlalchemy import event
from sqlalchemy.engine import Engine
import time
sqltimelogger = logging.getLogger("sqltimer")
sqltimelogger.setLevel(logging.DEBUG)
sqltimelogger.addHandler(loghandler)
@event.listens_for(Engine, "before_cursor_execute")
def before_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
conn.info.setdefault('query_start_time', []).append(time.time())
sqltimelogger.debug("Start Query: %s" % statement)
@event.listens_for(Engine, "after_cursor_execute")
def after_cursor_execute(conn, cursor, statement,
parameters, context, executemany):
total = time.time() - conn.info['query_start_time'].pop(-1)
sqltimelogger.debug("Query Complete!")
sqltimelogger.debug("Total Time: %f" % total)
config.read_config()
func = args.func
args = vars(args)
func(args["entities"])
if __name__ == '__main__':
main() | # Copyright (c) 2014 Wieland Hoffmann
# License: MIT, see LICENSE for details
import argparse
import logging
import multiprocessing
from . import config
from .indexing import reindex
from .schema import SCHEMA
from sqlalchemy import exc as sa_exc
logger = logging.getLogger("sir")
def watch(args):
raise NotImplementedError
def main():
loghandler = logging.StreamHandler()
formatter = logging.Formatter(fmt="%(processName)s %(asctime)s %(levelname)s: %(message)s")
loghandler.setFormatter(formatter)
logger.addHandler(loghandler)
mplogger = multiprocessing.get_logger()
mplogger.setLevel(logging.ERROR)
mplogger.addHandler(loghandler)
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true")
subparsers = parser.add_subparsers()
reindex_parser = subparsers.add_parser("reindex", help="Reindexes all or a single entity type")
reindex_parser.set_defaults(func=reindex)
reindex_parser.add_argument('--entities', action='append', help="""Which
entity types to index.
Available are: %s""" % (", ".join(SCHEMA.keys())))
watch_parser = subparsers.add_parser("watch", help="Watches for incoming messages on an AMQP queue")
watch_parser.set_defaults(func=watch)
args = parser.parse_args()
if args.debug:
logging.getLogger("sqlalchemy.engine").setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
config.read_config()
func = args.func
args = vars(args)
func(args["entities"])
if __name__ == '__main__':
main() | Python | 0.000014 |
dde133a9ae751ce3caab8e8896c1e04e48c0cc1e | fix typo | qiita_pet/handlers/base_handlers.py | qiita_pet/handlers/base_handlers.py | from tornado.web import RequestHandler
class BaseHandler(RequestHandler):
def get_current_user(self):
'''Overrides default method of returning user curently connected'''
user = self.get_secure_cookie("user")
if user is None:
self.clear_cookie("user")
return None
else:
return user.strip('" ')
def write_error(self, status_code, **kwargs):
'''Overrides the error page created by Tornado'''
from traceback import format_exception
if self.settings.get("debug") and "exc_info" in kwargs:
exc_info = kwargs["exc_info"]
trace_info = ''.join(["%s<br />" % line for line in
format_exception(*exc_info)])
request_info = ''.join(["<strong>%s</strong>: %s<br />" %
(k, self.request.__dict__[k]) for k in
self.request.__dict__.keys()])
error = exc_info[1]
self.render('error.html', error=error, trace_info=trace_info,
request_info=request_info,
user=self.current_user)
def head(self):
"""Adds proper response for head requests"""
self.finish()
class MainHandler(BaseHandler):
'''Index page'''
def get(self):
username = self.current_user
completedanalyses = []
self.render("index.html", user=username, analyses=completedanalyses)
class MockupHandler(BaseHandler):
def get(self):
self.render("mockup.html", user=self.current_user)
class NoPageHandler(BaseHandler):
def get(self):
self.render("404.html", user=self.current_user)
| from tornado.web import RequestHandler
class BaseHandler(RequestHandler):
def get_current_user(self):
'''Overrides default method of returning user curently connected'''
user = self.get_secure_cookie("user")
if user is None:
self.clear_cookie("user")
return None
else:
return user.strip('" ')
def write_error(self, status_code, **kwargs):
'''Overrides the error page created by Tornado'''
from traceback import format_exception
if self.settings.get("debug") and "exc_info" in kwargs:
exc_info = kwargs["exc_info"]
trace_info = ''.join(["%s<br />" % line for line in
format_exception(*exc_info)])
request_info = ''.join(["<strong>%s</strong>: %s<br />" %
(k, self.request.__dict__[k]) for k in
self.request.__dict__.keys()])
error = exc_info[1]
self.render('error.html', error=error, trace_info=trace_info,
request_info=request_info,
user=self.current_user)
def head(self):
"""Adds proper resonse for head requests"""
self.finish()
class MainHandler(BaseHandler):
'''Index page'''
def get(self):
username = self.current_user
completedanalyses = []
self.render("index.html", user=username, analyses=completedanalyses)
class MockupHandler(BaseHandler):
def get(self):
self.render("mockup.html", user=self.current_user)
class NoPageHandler(BaseHandler):
def get(self):
self.render("404.html", user=self.current_user)
| Python | 0.999991 |
6fbb50fcb851d0387d44dbaca361cc63f1dbbe79 | Add get_db_query method. | loldb/v2/resources.py | loldb/v2/resources.py | import collections
import os
import re
import sqlite3
import raf
def _get_highest_version(versions):
versions = [(v, v.split('.')) for v in versions]
def version_converter(version):
try:
parts = map(int, version[1])
except ValueError:
return None
else:
return [parts, version[0]]
versions = map(version_converter, versions)
versions = filter(lambda x: x is not None, versions)
versions = sorted(versions)
if not versions:
raise RuntimeError("No valid version.")
return versions[-1][1]
def _make_re_pattern(token_str, flags=None):
"""Converts spacing in token_str to variable-length, compiles."""
return re.compile(r'\s*'.join(token_str.split()), flags)
def _build_path(
base_path,
project="lol_air_client",
subdir='releases',
version=None):
"""Generate path for most recent release of a project."""
subdir = subdir.lower()
path = [base_path, "RADS/projects", project, subdir]
if subdir != 'filearchives':
if version is None:
current_base = os.path.join(*path)
versions = os.listdir(current_base)
versions = [v for v in versions if
os.path.isdir(os.path.join(current_base, v))]
version = _get_highest_version(versions)
path.append(version)
return os.path.join(*path)
class ResourceProvider(object):
def __init__(self, lol_path, language='en_US'):
self.base_path = lol_path
self.language = language
self.db = None
self.raf = None
self.font_config = None
def _get_db_path(self):
return os.path.join(
_build_path(self.base_path),
# TODO: Is /bin used on Windows?
'deploy/bin/assets/data/gameStats',
'gameStats_%s.sqlite' % self.language,
)
def _get_raf_path(self):
return _build_path(
self.base_path,
'lol_game_client',
'filearchives'
)
def get_db(self):
"""Get connection to gameStats database."""
if self.db is None:
self.db = sqlite3.connect(self._get_db_path())
return self.db
def get_db_query(self, query):
"""Get the rows from a gameStats database query."""
connection = self.get_db()
cursor = connection.cursor()
# execute doesn't accept a parametrized table name
rows = cursor.execute(query)
# Get column names from cursor
columns = [c[0] for c in cursor.description]
row_class = collections.namedtuple('Row', columns)
for row in rows:
row = row_class(*row)
yield row
def get_db_rows(self, table):
"""Get the rows from a gameStats database table."""
return self.get_db_query("SELECT * FROM `%s`" % table)
def get_raf_master(self):
"""Get RAFMaster instance for game client."""
if self.raf is None:
self.raf = raf.RAFMaster(self._get_raf_path())
return self.raf
def get_font_config(self):
"""Get font_config dictionary."""
if self.font_config is None:
archive = self.get_raf_master()
font_config = {}
font_config_text = archive.find(name='fontconfig_en_US.txt').read()
font_config_re = _make_re_pattern('^ tr "([^"]+)" = "(.+)" $', re.M)
for match in font_config_re.finditer(font_config_text):
font_config[match.group(1)] = match.group(2)
self.font_config = font_config
return self.font_config
class MacResourceProvider(ResourceProvider):
def __init__(self, lol_path=None, **kwargs):
if lol_path is None:
lol_path = "/Applications/League of Legends.app/Contents/LOL"
super(MacResourceProvider, self).__init__(lol_path, **kwargs)
class WindowsResourceProvider(ResourceProvider):
pass
| import collections
import os
import re
import sqlite3
import raf
def _get_highest_version(versions):
versions = [(v, v.split('.')) for v in versions]
def version_converter(version):
try:
parts = map(int, version[1])
except ValueError:
return None
else:
return [parts, version[0]]
versions = map(version_converter, versions)
versions = filter(lambda x: x is not None, versions)
versions = sorted(versions)
if not versions:
raise RuntimeError("No valid version.")
return versions[-1][1]
def _make_re_pattern(token_str, flags=None):
"""Converts spacing in token_str to variable-length, compiles."""
return re.compile(r'\s*'.join(token_str.split()), flags)
def _build_path(
base_path,
project="lol_air_client",
subdir='releases',
version=None):
"""Generate path for most recent release of a project."""
subdir = subdir.lower()
path = [base_path, "RADS/projects", project, subdir]
if subdir != 'filearchives':
if version is None:
current_base = os.path.join(*path)
versions = os.listdir(current_base)
versions = [v for v in versions if
os.path.isdir(os.path.join(current_base, v))]
version = _get_highest_version(versions)
path.append(version)
return os.path.join(*path)
class ResourceProvider(object):
def __init__(self, lol_path, language='en_US'):
self.base_path = lol_path
self.language = language
self.db = None
self.raf = None
self.font_config = None
def _get_db_path(self):
return os.path.join(
_build_path(self.base_path),
# TODO: Is /bin used on Windows?
'deploy/bin/assets/data/gameStats',
'gameStats_%s.sqlite' % self.language,
)
def _get_raf_path(self):
return _build_path(
self.base_path,
'lol_game_client',
'filearchives'
)
def get_db(self):
"""Get connection to gameStats database."""
if self.db is None:
self.db = sqlite3.connect(self._get_db_path())
return self.db
def get_db_rows(self, table):
"""Get the rows from a gameStats database table."""
connection = self.get_db()
cursor = connection.cursor()
# execute doesn't accept a parametrized table name
rows = cursor.execute("SELECT * FROM `%s`" % table)
# Get column names from cursor
columns = [c[0] for c in cursor.description]
row_class = collections.namedtuple('Row', columns)
for row in rows:
row = row_class(*row)
yield row
def get_raf_master(self):
"""Get RAFMaster instance for game client."""
if self.raf is None:
self.raf = raf.RAFMaster(self._get_raf_path())
return self.raf
def get_font_config(self):
"""Get font_config dictionary."""
if self.font_config is None:
archive = self.get_raf_master()
font_config = {}
font_config_text = archive.find(name='fontconfig_en_US.txt').read()
font_config_re = _make_re_pattern('^ tr "([^"]+)" = "(.+)" $', re.M)
for match in font_config_re.finditer(font_config_text):
font_config[match.group(1)] = match.group(2)
self.font_config = font_config
return self.font_config
class MacResourceProvider(ResourceProvider):
def __init__(self, lol_path=None, **kwargs):
if lol_path is None:
lol_path = "/Applications/League of Legends.app/Contents/LOL"
super(MacResourceProvider, self).__init__(lol_path, **kwargs)
class WindowsResourceProvider(ResourceProvider):
pass
| Python | 0 |
45ab8a3585008cc4ad31eb553b9291f4e1a65c01 | Update C++ version, #190 | binding.gyp | binding.gyp | {
"variables": {
"os_linux_compiler%": "gcc",
"use_vl32%": "false",
"use_fixed_size%": "false",
"use_posix_semaphores%": "false"
},
"targets": [
{
"target_name": "node-lmdb",
"win_delay_load_hook": "false",
"sources": [
"dependencies/lmdb/libraries/liblmdb/mdb.c",
"dependencies/lmdb/libraries/liblmdb/midl.c",
"src/node-lmdb.cpp",
"src/env.cpp",
"src/misc.cpp",
"src/txn.cpp",
"src/dbi.cpp",
"src/cursor.cpp"
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
"dependencies/lmdb/libraries/liblmdb"
],
"conditions": [
["OS=='linux'", {
"variables": {
"gcc_version" : "<!(<(os_linux_compiler) -dumpversion | cut -d '.' -f 1)",
},
"conditions": [
["gcc_version>=7", {
"cflags": [
"-Wimplicit-fallthrough=2",
],
}],
],
"ldflags": [
"-fPIC",
"-fvisibility=hidden"
],
"cflags": [
"-fPIC",
"-fvisibility=hidden"
],
"cflags_cc": [
"-fPIC",
"-fvisibility=hidden",
"-fvisibility-inlines-hidden",
"-std=c++14"
]
}],
["OS=='mac'", {
"conditions": [
["use_posix_semaphores=='true'", {
"defines": ["MDB_USE_POSIX_SEM"]
}]
],
"xcode_settings": {
"OTHER_CPLUSPLUSFLAGS" : ["-std=c++14"],
"MACOSX_DEPLOYMENT_TARGET": "10.7",
"OTHER_LDFLAGS": ["-std=c++14"],
"CLANG_CXX_LIBRARY": "libc++"
}
}],
["OS=='win'", {
"libraries": ["ntdll.lib"]
}],
["use_fixed_size=='true'", {
"defines": ["MDB_FIXEDSIZE"],
}],
["use_vl32=='true'", {
"conditions": [
["target_arch=='ia32'", {
"defines": ["MDB_VL32"]
}]
]
}],
],
}
]
}
| {
"variables": {
"os_linux_compiler%": "gcc",
"use_vl32%": "false",
"use_fixed_size%": "false",
"use_posix_semaphores%": "false"
},
"targets": [
{
"target_name": "node-lmdb",
"win_delay_load_hook": "false",
"sources": [
"dependencies/lmdb/libraries/liblmdb/mdb.c",
"dependencies/lmdb/libraries/liblmdb/midl.c",
"src/node-lmdb.cpp",
"src/env.cpp",
"src/misc.cpp",
"src/txn.cpp",
"src/dbi.cpp",
"src/cursor.cpp"
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
"dependencies/lmdb/libraries/liblmdb"
],
"conditions": [
["OS=='linux'", {
"variables": {
"gcc_version" : "<!(<(os_linux_compiler) -dumpversion | cut -d '.' -f 1)",
},
"conditions": [
["gcc_version>=7", {
"cflags": [
"-Wimplicit-fallthrough=2",
],
}],
],
"ldflags": [
"-fPIC",
"-fvisibility=hidden"
],
"cflags": [
"-fPIC",
"-fvisibility=hidden"
],
"cflags_cc": [
"-fPIC",
"-fvisibility=hidden",
"-fvisibility-inlines-hidden",
"-std=c++0x"
]
}],
["OS=='mac'", {
"conditions": [
["use_posix_semaphores=='true'", {
"defines": ["MDB_USE_POSIX_SEM"]
}]
],
"xcode_settings": {
"OTHER_CPLUSPLUSFLAGS" : ["-std=c++11"],
"MACOSX_DEPLOYMENT_TARGET": "10.7",
"OTHER_LDFLAGS": ["-std=c++11"],
"CLANG_CXX_LIBRARY": "libc++"
}
}],
["OS=='win'", {
"libraries": ["ntdll.lib"]
}],
["use_fixed_size=='true'", {
"defines": ["MDB_FIXEDSIZE"],
}],
["use_vl32=='true'", {
"conditions": [
["target_arch=='ia32'", {
"defines": ["MDB_VL32"]
}]
]
}],
],
}
]
}
| Python | 0 |
9c950c73bdc518c35aa471834431222c7c60ea4b | update binding.gyp | binding.gyp | binding.gyp | {
"targets": [
{
"target_name": "ons",
"sources": [
"src/entry.cpp",
"src/ons_options.cpp",
"src/consumer_ack.cpp",
"src/consumer.cpp",
"src/producer.cpp",
"src/consumer_listener.cpp",
"src/third_party/sole/sole.cpp"
],
"include_dirs": [
"src/third_party/include",
"src/third_party/sole",
"<!(node -e \"require('nan')\")"
],
"conditions": [
["OS==\"mac\"", {
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions", "-pthread", "-Wl,--no-as-needed", "-ldl" ],
"cflags_cc": [ "-Wno-ignored-qualifiers" ],
"cflags": [ "-std=c++11", "-stdlib=libc++" ],
"include_dirs": [
"/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1"
],
"xcode_settings": {
"GCC_ENABLE_CPP_EXCEPTIONS": "YES"
}
}],
["OS==\"linux\"", {
"libraries": [
"../src/third_party/lib/linux/libonsclient4cpp.a"
],
"cflags_cc!": [ "-fno-exceptions", "-pthread", "-Wl,--no-as-needed", "-ldl" ],
"cflags_cc": [ "-Wno-ignored-qualifiers" ],
"cflags": [ "-std=c++11" ]
}],
["OS==\"win\"", {
"conditions": [
["target_arch==\"ia32\"", {
"libraries": [
"../src/third_party/lib/windows/32x/ONSClient4CPP.lib"
],
"copies": [
{
"destination": "<(module_root_dir)/build/Release/",
"files": [ "<(module_root_dir)/src/third_party/lib/windows/32x/ONSClient4CPP.dll" ]
}
]
}],
["target_arch==\"x64\"", {
"libraries": [
"../src/third_party/lib/windows/64x/ONSClient4CPP.lib"
],
"copies": [
{
"destination": "<(module_root_dir)/build/Release/",
"files": [ "<(module_root_dir)/src/third_party/lib/windows/64x/ONSClient4CPP.dll" ]
}
]
}]
]
}]
]
}
]
}
| {
"targets": [
{
"target_name": "ons",
"sources": [
"src/entry.cpp",
"src/ons_options.cpp",
"src/consumer_ack.cpp",
"src/consumer.cpp",
"src/producer.cpp",
"src/consumer_listener.cpp"
],
"include_dirs": [
"src/third_party/include",
"<!(node -e \"require('nan')\")"
],
"conditions": [
["OS==\"mac\"", {
"cflags!": [ "-fno-exceptions" ],
"cflags_cc!": [ "-fno-exceptions", "-pthread", "-Wl,--no-as-needed", "-ldl" ],
"cflags_cc": [ "-Wno-ignored-qualifiers" ],
"cflags": [ "-std=c++11", "-stdlib=libc++" ],
"include_dirs": [
"/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1"
],
"xcode_settings": {
"GCC_ENABLE_CPP_EXCEPTIONS": "YES"
}
}],
["OS==\"linux\"", {
"libraries": [
"../src/third_party/lib/linux/libonsclient4cpp.a"
],
"cflags_cc!": [ "-fno-exceptions", "-pthread", "-Wl,--no-as-needed", "-ldl" ],
"cflags_cc": [ "-Wno-ignored-qualifiers" ],
"cflags": [ "-std=c++11" ]
}],
["OS==\"win\"", {
"conditions": [
["target_arch==\"ia32\"", {
"libraries": [
"../src/third_party/lib/windows/32x/ONSClient4CPP.lib"
],
"copies": [
{
"destination": "<(module_root_dir)/build/Release/",
"files": [ "<(module_root_dir)/src/third_party/lib/windows/32x/ONSClient4CPP.dll" ]
}
]
}],
["target_arch==\"x64\"", {
"libraries": [
"../src/third_party/lib/windows/64x/ONSClient4CPP.lib"
],
"copies": [
{
"destination": "<(module_root_dir)/build/Release/",
"files": [ "<(module_root_dir)/src/third_party/lib/windows/64x/ONSClient4CPP.dll" ]
}
]
}]
]
}]
]
}
]
}
| Python | 0 |
f0f3a7ab0b285f447f0573ff537e6252a8752528 | Use pkg-build in binding.gyp | binding.gyp | binding.gyp | {
"targets": [
{
"target_name": "tiff-multipage",
"sources": [
"src/module.cc",
"src/sync.cc",
"src/async.cc",
"src/tiff_multipage.cc"
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
"<!@(pkg-config libtiff-4 --cflags-only-I | sed s/-I//g)"
],
"libraries": [
"-ltiff",
"<!@(pkg-config --libs libtiff-4)"
]
}
]
}
| {
"targets": [
{
"target_name": "tiff-multipage",
"sources": [
"src/module.cc",
"src/sync.cc",
"src/async.cc",
"src/tiff_multipage.cc"
],
"include_dirs": ["<!(node -e \"require('nan')\")"],
"libraries": [
"-ltiff"
]
}
]
}
| Python | 0 |
c34f040ba19c27277d6cc9a1ad46e4c8d668e77b | Apply -DNDEBUG globally on release builds | binding.gyp | binding.gyp | {
"target_defaults": {
"target_conditions": [
["OS != 'win'", {
"cflags": ["-fdata-sections", "-ffunction-sections", "-fvisibility=hidden"],
"ldflags": ["-Wl,--gc-sections"]
}],
["OS == 'mac'", {
"xcode_settings": {
"MACOSX_DEPLOYMENT_TARGET": "10.9",
}
}]
],
"target_configurations": [
[{
"Release": {
"defines+": ["NDEBUG"]
}
}]
]
},
"targets": [
{
"target_name": "libargon2",
"sources": [
"argon2/src/argon2.c",
"argon2/src/core.c",
"argon2/src/blake2/blake2b.c",
"argon2/src/thread.c",
"argon2/src/encoding.c",
],
"include_dirs": ["argon2/include"],
"cflags": ["-march=native", "-Wno-type-limits"],
"conditions": [
["target_arch == 'ia32' or target_arch == 'x64'", {
"cflags+": ["-msse", "-msse2"],
"sources+": ["argon2/src/opt.c"]
}, {
"sources+": ["argon2/src/ref.c"]
}]
],
"type": "static_library"
}, {
"target_name": "argon2",
"sources": [
"src/argon2_node.cpp"
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
"argon2/include"
],
"dependencies": ["libargon2"],
"configurations": {
"Debug": {
"conditions": [
["OS == 'linux'", {
"cflags": ["--coverage", "-Wall", "-Wextra"],
"ldflags": ["-fprofile-arcs", "-ftest-coverage"],
}]
]
}
}
}
]
}
| {
"target_defaults": {
"target_conditions": [
["OS != 'win'", {
"cflags": ["-fdata-sections", "-ffunction-sections", "-fvisibility=hidden"],
"ldflags": ["-Wl,--gc-sections"]
}],
["OS == 'mac'", {
"xcode_settings": {
"MACOSX_DEPLOYMENT_TARGET": "10.9",
}
}]
]
},
"targets": [
{
"target_name": "libargon2",
"sources": [
"argon2/src/argon2.c",
"argon2/src/core.c",
"argon2/src/blake2/blake2b.c",
"argon2/src/thread.c",
"argon2/src/encoding.c",
],
"include_dirs": ["argon2/include"],
"cflags": ["-march=native", "-Wno-type-limits"],
"conditions": [
["target_arch == 'ia32' or target_arch == 'x64'", {
"cflags+": ["-msse", "-msse2"],
"sources+": ["argon2/src/opt.c"]
}, {
"sources+": ["argon2/src/ref.c"]
}]
],
"type": "static_library"
}, {
"target_name": "argon2",
"sources": [
"src/argon2_node.cpp"
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
"argon2/include"
],
"dependencies": ["libargon2"],
"configurations": {
"Debug": {
"conditions": [
["OS == 'linux'", {
"cflags": ["--coverage", "-Wall", "-Wextra"],
"ldflags": ["-fprofile-arcs", "-ftest-coverage"],
}]
]
},
"Release": {
"defines+": ["NDEBUG"]
}
}
}
]
}
| Python | 0 |
b0dea361dfb27e537c0165dac69e71c20f33e883 | Add helpers to bindings.gyp | binding.gyp | binding.gyp | {
'targets': [
{
'target_name': 'jsaudio',
'sources': ['src/jsaudio.cc', 'src/helpers.cc'],
'include_dirs': [
'<!(node -e "require(\'nan\')")',
'<(module_root_dir)/vendor/'
],
"conditions": [
[
'OS=="win"', {
"conditions": [
[
'target_arch=="ia32"', {
"libraries": [
'<(module_root_dir)/vendor/portaudio_x86.lib'
],
'copies': [
{
'destination': '<(module_root_dir)/build/Release/',
'files': [
'<(module_root_dir)/vendor/portaudio_x86.dll',
'<(module_root_dir)/vendor/portaudio_x86.lib',
]
}]
}
],
[
'target_arch=="x64"', {
"libraries": [
'<(module_root_dir)/vendor/portaudio_x64.lib'
],
'copies': [
{
'destination': '<(module_root_dir)/build/Release/',
'files': [
'<(module_root_dir)/vendor/portaudio_x64.dll',
'<(module_root_dir)/vendor/portaudio_x64.lib',
]
}]
}
]
],
"include_dirs": ["gyp/include"]
}
]
]
}]
}
| {
'targets': [
{
'target_name': 'jsaudio',
'sources': ['src/jsaudio.cc'],
'include_dirs': [
'<!(node -e "require(\'nan\')")',
'<(module_root_dir)/vendor/'
],
"conditions": [
[
'OS=="win"', {
"conditions": [
[
'target_arch=="ia32"', {
"libraries": [
'<(module_root_dir)/vendor/portaudio_x86.lib'
],
'copies': [
{
'destination': '<(module_root_dir)/build/Release/',
'files': [
'<(module_root_dir)/vendor/portaudio_x86.dll',
'<(module_root_dir)/vendor/portaudio_x86.lib',
]
}]
}
],
[
'target_arch=="x64"', {
"libraries": [
'<(module_root_dir)/vendor/portaudio_x64.lib'
],
'copies': [
{
'destination': '<(module_root_dir)/build/Release/',
'files': [
'<(module_root_dir)/vendor/portaudio_x64.dll',
'<(module_root_dir)/vendor/portaudio_x64.lib',
]
}]
}
]
],
"include_dirs": ["gyp/include"]
}
]
]
}]
}
| Python | 0.000001 |
7e5cafed3908f829bb8ff334a7d8f6ebb939a7cc | fix test import for python3 | d4s2_api/dukeds_auth.py | d4s2_api/dukeds_auth.py | from gcb_web_auth.dukeds_auth import DukeDSTokenAuthentication
from gcb_web_auth.backends.dukeds import DukeDSAuthBackend
from gcb_web_auth.backends.base import BaseBackend
from .models import DukeDSUser
class D4S2DukeDSTokenAuthentication(DukeDSTokenAuthentication):
"""
Extends authorization to save users to DukeDSUser
"""
def __init__(self):
self.backend = DukeDSAuthBackend()
class D4S2DukeDSAuthBackend(DukeDSAuthBackend):
"""
Backend for DukeDS Auth that save users to DukeDSUser
Conveniently, the keys used by DukeDS user objects are a superset of the django ones,
so we rely on the filtering in the base class
"""
def __init__(self, save_tokens=True, save_dukeds_users=True):
super(D4S2DukeDSAuthBackend, self).__init__(save_tokens, save_dukeds_users)
self.save_tokens = save_tokens
self.save_dukeds_users = save_dukeds_users
self.failure_reason = None
def save_dukeds_user(self, user, raw_user_dict):
user_dict = DukeDSAuthBackend.harmonize_dukeds_user_details(raw_user_dict)
dukeds_user, created = DukeDSUser.objects.get_or_create(user=user,
dds_id=raw_user_dict.get('id'))
if created:
BaseBackend.update_model(dukeds_user, user_dict)
| from gcb_web_auth.dukeds_auth import DukeDSTokenAuthentication
from gcb_web_auth.backends.dukeds import DukeDSAuthBackend
from gcb_web_auth.backends.base import BaseBackend
from models import DukeDSUser
class D4S2DukeDSTokenAuthentication(DukeDSTokenAuthentication):
"""
Extends authorization to save users to DukeDSUser
"""
def __init__(self):
self.backend = DukeDSAuthBackend()
class D4S2DukeDSAuthBackend(DukeDSAuthBackend):
"""
Backend for DukeDS Auth that save users to DukeDSUser
Conveniently, the keys used by DukeDS user objects are a superset of the django ones,
so we rely on the filtering in the base class
"""
def __init__(self, save_tokens=True, save_dukeds_users=True):
super(D4S2DukeDSAuthBackend, self).__init__(save_tokens, save_dukeds_users)
self.save_tokens = save_tokens
self.save_dukeds_users = save_dukeds_users
self.failure_reason = None
def save_dukeds_user(self, user, raw_user_dict):
user_dict = DukeDSAuthBackend.harmonize_dukeds_user_details(raw_user_dict)
dukeds_user, created = DukeDSUser.objects.get_or_create(user=user,
dds_id=raw_user_dict.get('id'))
if created:
BaseBackend.update_model(dukeds_user, user_dict)
| Python | 0.000001 |
7c788c868323aa8c6237caab208d726c5cce24ac | address first time new user condition where user_id may be none | cis/user.py | cis/user.py | """First class object to represent a user and data about that user."""
import logging
from cis.settings import get_config
logger = logging.getLogger(__name__)
class Profile(object):
def __init__(self, boto_session=None, profile_data=None):
"""
:param boto_session: The boto session object from the constructor.
:param profile_data: The decrypted user profile JSON.
"""
self.boto_session = boto_session
self.config = get_config()
self.profile_data = profile_data
self.dynamodb_table = None
@property
def exists(self):
if self._retrieve_from_vault() is not None:
return True
else:
return False
def retrieve_from_vault(self):
logger.info(
'Attempting to retrieve the following from the vault: {}'.format(
self.profile_data.get('user_id')
)
)
if not self.dynamodb_table:
self._connect_dynamo_db()
user_key = {'user_id': self.profile_data.get('user_id')}
if user_key is not None:
response = self.dynamodb_table.get_item(Key=user_key)
else:
response = None
self.profile_data = response
return response
def store_in_vault(self):
logger.info(
'Attempting storage of the following user to the vault: {}'.format(
self.profile_data.get('user_id')
)
)
if not self.dynamodb_table:
self._connect_dynamo_db()
response = self.dynamodb_table.put_item(
Item=self.profile_data
)
return (response['ResponseMetadata']['HTTPStatusCode'] is 200)
def _connect_dynamo_db(self):
"""New up a dynamodb resource from boto session."""
dynamodb = self.boto_session.resource('dynamodb')
dynamodb_table = self.config('dynamodb_table', namespace='cis')
self.dynamodb_table = dynamodb.Table(dynamodb_table)
| """First class object to represent a user and data about that user."""
import logging
from cis.settings import get_config
logger = logging.getLogger(__name__)
class Profile(object):
def __init__(self, boto_session=None, profile_data=None):
"""
:param boto_session: The boto session object from the constructor.
:param profile_data: The decrypted user profile JSON.
"""
self.boto_session = boto_session
self.config = get_config()
self.profile_data = profile_data
self.dynamodb_table = None
@property
def exists(self):
if self._retrieve_from_vault() is not None:
return True
else:
return False
def retrieve_from_vault(self):
logger.info(
'Attempting to retrieve the following from the vault: {}'.format(
self.profile_data.get('user_id')
)
)
if not self.dynamodb_table:
self._connect_dynamo_db()
user_key = {'user_id': self.profile_data.get('user_id')}
response = self.dynamodb_table.get_item(Key=user_key)
self.profile_data = response
return response
def store_in_vault(self):
logger.info(
'Attempting storage of the following user to the vault: {}'.format(
self.profile_data.get('user_id')
)
)
if not self.dynamodb_table:
self._connect_dynamo_db()
response = self.dynamodb_table.put_item(
Item=self.profile_data
)
return (response['ResponseMetadata']['HTTPStatusCode'] is 200)
def _connect_dynamo_db(self):
"""New up a dynamodb resource from boto session."""
dynamodb = self.boto_session.resource('dynamodb')
dynamodb_table = self.config('dynamodb_table', namespace='cis')
self.dynamodb_table = dynamodb.Table(dynamodb_table)
| Python | 0.000009 |
81833470d1eb831e27e9e34712b983efbc38a735 | Convert entire table to cartesian | solar_neighbourhood/prepare_data_add_kinematics.py | solar_neighbourhood/prepare_data_add_kinematics.py | """
Add very large RV errors for stars with no known RVs.
Convert to cartesian.
"""
import numpy as np
import sys
sys.path.insert(0, '..')
from chronostar import tabletool
from astropy.table import Table
datafile = Table.read('../data/ScoCen_box_result.fits')
d = Table.read(datafile)
# Set missing radial velocities (nan) to 0
d['radial_velocity'] = np.nan_to_num(d['radial_velocity'])
# Set missing radial velocity errors (nan) to 1e+10
d['radial_velocity_error'][np.isnan(d['radial_velocity_error'])] = 1e+4
print('Convert to cartesian')
tabletool.convert_table_astro2cart(table=d, return_table=True)
d.write('../data/ScoCen_box_result_15M_ready_for_bg_ols.fits')
print('Cartesian written.', len(d)) | """
Add very large RV errors for stars with no known RVs.
Convert to cartesian.
"""
import numpy as np
import sys
sys.path.insert(0, '..')
from chronostar import tabletool
from astropy.table import Table
datafile = Table.read('../data/ScoCen_box_result.fits')
d = tabletool.read(datafile)
# Set missing radial velocities (nan) to 0
d['radial_velocity'] = np.nan_to_num(d['radial_velocity'])
# Set missing radial velocity errors (nan) to 1e+10
d['radial_velocity_error'][np.isnan(d['radial_velocity_error'])] = 1e+4
print('Convert to cartesian')
tabletool.convert_table_astro2cart(table=d, return_table=True)
d.write('../data/ScoCen_box_result_15M_ready_for_bg_ols.fits')
print('Cartesian written.', len(d)) | Python | 0.999999 |
0700e25b4dce989fcfc6ee367c7516578c8aaf5b | Update heartbeat in idle times | lava_scheduler_daemon/service.py | lava_scheduler_daemon/service.py | # Copyright (C) 2013 Linaro Limited
#
# Author: Senthil Kumaran <senthil.kumaran@linaro.org>
#
# This file is part of LAVA Scheduler.
#
# LAVA Scheduler is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License version 3 as
# published by the Free Software Foundation
#
# LAVA Scheduler is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with LAVA Scheduler. If not, see <http://www.gnu.org/licenses/>.
import logging
import xmlrpclib
from twisted.application.service import Service
from twisted.internet import defer
from twisted.internet.task import LoopingCall
from lava_scheduler_app import utils
from lava_scheduler_daemon.job import JobRunner, catchall_errback
from lava_scheduler_daemon.worker import WorkerData
class JobQueue(Service):
def __init__(self, source, dispatcher, reactor, daemon_options):
self.logger = logging.getLogger(__name__ + '.JobQueue')
self.source = source
self.dispatcher = dispatcher
self.reactor = reactor
self.daemon_options = daemon_options
self._check_job_call = LoopingCall(self._checkJobs)
self._check_job_call.clock = reactor
def _checkJobs(self):
# Update Worker Heartbeat
#
# NOTE: This will recide here till we finalize scheduler refactoring
# and a separte module for worker specific daemon gets created.
self.logger.debug("Worker heartbeat")
worker = WorkerData()
# Record the scheduler tick (timestamp).
worker.record_master_scheduler_tick()
try:
worker.put_heartbeat_data()
except (xmlrpclib.Fault, xmlrpclib.ProtocolError) as err:
worker.logger.error("Heartbeat update failed!")
self.logger.debug("Refreshing jobs")
return self.source.getJobList().addCallback(
self._startJobs).addErrback(catchall_errback(self.logger))
def _startJobs(self, jobs):
for job in jobs:
new_job = JobRunner(self.source, job, self.dispatcher,
self.reactor, self.daemon_options)
self.logger.info("Starting Job: %d " % job.id)
new_job.start()
def startService(self):
self.logger.info("\n\nLAVA Scheduler starting\n\n")
self._check_job_call.start(20)
def stopService(self):
self._check_job_call.stop()
return None
| # Copyright (C) 2013 Linaro Limited
#
# Author: Senthil Kumaran <senthil.kumaran@linaro.org>
#
# This file is part of LAVA Scheduler.
#
# LAVA Scheduler is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License version 3 as
# published by the Free Software Foundation
#
# LAVA Scheduler is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with LAVA Scheduler. If not, see <http://www.gnu.org/licenses/>.
import logging
import xmlrpclib
from twisted.application.service import Service
from twisted.internet import defer
from twisted.internet.task import LoopingCall
from lava_scheduler_app import utils
from lava_scheduler_daemon.job import JobRunner, catchall_errback
from lava_scheduler_daemon.worker import WorkerData
class JobQueue(Service):
def __init__(self, source, dispatcher, reactor, daemon_options):
self.logger = logging.getLogger(__name__ + '.JobQueue')
self.source = source
self.dispatcher = dispatcher
self.reactor = reactor
self.daemon_options = daemon_options
self._check_job_call = LoopingCall(self._checkJobs)
self._check_job_call.clock = reactor
def _checkJobs(self):
self.logger.debug("Refreshing jobs")
return self.source.getJobList().addCallback(
self._startJobs).addErrback(catchall_errback(self.logger))
def _startJobs(self, jobs):
# Update Worker Heartbeat
#
# NOTE: This will recide here till we finalize scheduler refactoring
# and a separte module for worker specific daemon gets created.
worker = WorkerData()
# Record the scheduler tick (timestamp).
worker.record_master_scheduler_tick()
try:
worker.put_heartbeat_data()
except (xmlrpclib.Fault, xmlrpclib.ProtocolError) as err:
worker.logger.error("Heartbeat update failed!")
for job in jobs:
new_job = JobRunner(self.source, job, self.dispatcher,
self.reactor, self.daemon_options)
self.logger.info("Starting Job: %d " % job.id)
new_job.start()
def startService(self):
self.logger.info("\n\nLAVA Scheduler starting\n\n")
self._check_job_call.start(20)
def stopService(self):
self._check_job_call.stop()
return None
| Python | 0.000003 |
fedb3768539259568555d5a62d503c7995f4b9a2 | Handle orgs that you don’t own personally. | readthedocs/oauth/utils.py | readthedocs/oauth/utils.py | import logging
from .models import GithubProject, GithubOrganization
log = logging.getLogger(__name__)
def make_github_project(user, org, privacy, repo_json):
if (repo_json['private'] is True and privacy == 'private' or
repo_json['private'] is False and privacy == 'public'):
project, created = GithubProject.objects.get_or_create(
full_name=repo_json['full_name'],
)
if project.user != user:
log.debug('Not importing %s because mismatched user' % repo_json['name'])
return None
if project.organization and project.organization != org:
log.debug('Not importing %s because mismatched orgs' % repo_json['name'])
return None
project.organization=org
project.name = repo_json['name']
project.description = repo_json['description']
project.git_url = repo_json['git_url']
project.ssh_url = repo_json['ssh_url']
project.html_url = repo_json['html_url']
project.json = repo_json
project.save()
return project
else:
log.debug('Not importing %s because mismatched type' % repo_json['name'])
def make_github_organization(user, org_json):
org, created = GithubOrganization.objects.get_or_create(
login=org_json.get('login'),
html_url=org_json.get('html_url'),
name=org_json.get('name'),
email=org_json.get('email'),
json=org_json,
)
org.users.add(user)
return org
| import logging
from .models import GithubProject, GithubOrganization
log = logging.getLogger(__name__)
def make_github_project(user, org, privacy, repo_json):
if (repo_json['private'] is True and privacy == 'private' or
repo_json['private'] is False and privacy == 'public'):
project, created = GithubProject.objects.get_or_create(
user=user,
organization=org,
full_name=repo_json['full_name'],
)
project.name = repo_json['name']
project.description = repo_json['description']
project.git_url = repo_json['git_url']
project.ssh_url = repo_json['ssh_url']
project.html_url = repo_json['html_url']
project.json = repo_json
project.save()
return project
else:
log.debug('Not importing %s because mismatched type' % repo_json['name'])
def make_github_organization(user, org_json):
org, created = GithubOrganization.objects.get_or_create(
login=org_json.get('login'),
html_url=org_json.get('html_url'),
name=org_json.get('name'),
email=org_json.get('email'),
json=org_json,
)
org.users.add(user)
return org
| Python | 0 |
7755c117e354871bbc06c98d0709545cee2032ba | Add versioning managers | share/models/base.py | share/models/base.py | import uuid
import inspect
from django.db import models
from django.conf import settings
from django.db import transaction
from django.db.models.base import ModelBase
from django.db.models.fields.related import lazy_related_operation
from share.models.core import RawData
from share.models.core import ShareUser
class AbstractShareObject(models.Model):
# id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
source = models.ForeignKey(ShareUser)
source_data = models.ForeignKey(RawData, blank=True, null=True) # NULL/None indicates a user submitted change
changed_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
abstract = True
class ShareObjectVersion(models.Model):
action = models.CharField(max_length=10);
persistant_id = models.PositiveIntegerField() # Must match the id of ShareObject
class Meta:
abstract = True
ordering = ('-changed_at', )
class ShareForeignKey(models.ForeignKey):
def __init__(self, model, **kwargs):
self.__kwargs = kwargs
super().__init__(model, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
actual = self.__class__.mro()[1](self.remote_field.model, **self.__kwargs)
actual.contribute_to_class(cls, name, **kwargs)
self.__kwargs['editable'] = False
version = self.__class__.mro()[1](self.remote_field.model.VersionModel, **self.__kwargs)
version.contribute_to_class(cls, name + '_version', **kwargs)
class ShareManyToMany(models.ManyToManyField):
def __init__(self, model, **kwargs):
self.__kwargs = kwargs
super().__init__(model, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
actual = self.__class__.mro()[1](self.remote_field.model, **self.__kwargs)
actual.contribute_to_class(cls, name, **kwargs)
self.__kwargs['through'] += 'Version'
self.__kwargs['editable'] = False
version = self.__class__.mro()[1](self.remote_field.model.VersionModel, **self.__kwargs)
version.contribute_to_class(cls, name[:-1] + '_versions', **kwargs)
class ShareObjectMeta(ModelBase):
def __new__(cls, name, bases, attrs):
if models.Model in bases or len(bases) > 1:
return super(ShareObjectMeta, cls).__new__(cls, name, bases, attrs)
module = attrs['__module__']
if not attrs.get('Meta'):
attrs['Meta'] = type('Meta', (object, ), {})
attrs['Meta'].abstract = True
attrs['__qualname__'] = 'Abstract' + attrs['__qualname__']
abstract = super(ShareObjectMeta, cls).__new__(cls, 'Abstract' + name, (AbstractShareObject, ), attrs)
version = type(
name + 'Version',
(abstract, ShareObjectVersion),
{'__module__': module}
)
concrete = super(ShareObjectMeta, cls).__new__(cls, name, (abstract, ShareObject), {
'__module__': module,
'VersionModel': version,
'version': models.OneToOneField(version, on_delete=models.PROTECT, related_name='%(app_label)s_%(class)s_version')
})
inspect.stack()[1].frame.f_globals.update({concrete.VersionModel.__name__: concrete.VersionModel})
return concrete
class VersionManagerDescriptor:
def __init__(self, model):
self.model = model
def __get__(self, instance, type=None):
if instance is not None:
return VersionManager(self.model, instance)
return VersionManager(self.model, instance)
class VersionManager(models.Manager):
def __init__(self, model=None, instance=None):
super().__init__()
self.model = model
self.instance = instance
def get_queryset(self):
qs = self._queryset_class(model=self.model.VersionModel, using=self._db, hints=self._hints).order_by('-changed_at')
if self.instance:
return qs.filter(persistant_id=self.instance.id)
return qs
def contribute_to_class(self, model, name):
super().contribute_to_class(model, name)
if not model._meta.abstract:
setattr(model, name, VersionManagerDescriptor(model))
class ShareObject(models.Model, metaclass=ShareObjectMeta):
id = models.AutoField(primary_key=True)
objects = models.Manager()
versions = VersionManager()
class Meta:
abstract = True
| import uuid
import inspect
from django.db import models
from django.conf import settings
from django.db import transaction
from django.db.models.base import ModelBase
from django.db.models.fields.related import lazy_related_operation
from share.models.core import RawData
from share.models.core import ShareUser
class AbstractShareObject(models.Model):
# id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
source = models.ForeignKey(ShareUser)
source_data = models.ForeignKey(RawData, blank=True, null=True) # NULL/None indicates a user submitted change
changed_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class ShareObjectVersion(models.Model):
action = models.CharField(max_length=10);
persistant_id = models.PositiveIntegerField() # Must match the id of ShareObject
class Meta:
abstract = True
class ShareForeignKey(models.ForeignKey):
def __init__(self, model, **kwargs):
self.__kwargs = kwargs
super().__init__(model, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
actual = self.__class__.mro()[1](self.remote_field.model, **self.__kwargs)
actual.contribute_to_class(cls, name, **kwargs)
self.__kwargs['editable'] = False
version = self.__class__.mro()[1](self.remote_field.model.VersionModel, **self.__kwargs)
version.contribute_to_class(cls, name + '_version', **kwargs)
class ShareManyToMany(models.ManyToManyField):
def __init__(self, model, **kwargs):
self.__kwargs = kwargs
super().__init__(model, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
actual = self.__class__.mro()[1](self.remote_field.model, **self.__kwargs)
actual.contribute_to_class(cls, name, **kwargs)
self.__kwargs['through'] += 'Version'
self.__kwargs['editable'] = False
version = self.__class__.mro()[1](self.remote_field.model.VersionModel, **self.__kwargs)
version.contribute_to_class(cls, name[:-1] + '_versions', **kwargs)
class ShareObjectMeta(ModelBase):
def __new__(cls, name, bases, attrs):
if models.Model in bases or len(bases) > 1:
return super(ShareObjectMeta, cls).__new__(cls, name, bases, attrs)
module = attrs['__module__']
if not attrs.get('Meta'):
attrs['Meta'] = type('Meta', (object, ), {})
attrs['Meta'].abstract = True
attrs['__qualname__'] = 'Abstract' + attrs['__qualname__']
abstract = super(ShareObjectMeta, cls).__new__(cls, 'Abstract' + name, (AbstractShareObject, ), attrs)
version = type(
name + 'Version',
(abstract, ShareObjectVersion),
{'__module__': module}
)
concrete = super(ShareObjectMeta, cls).__new__(cls, name, (abstract, ShareObject), {
'__module__': module,
'version': models.OneToOneField(version, on_delete=models.PROTECT, related_name='%(app_label)s_%(class)s_version')
})
concrete.VersionModel = version
inspect.stack()[1].frame.f_globals.update({concrete.VersionModel.__name__: concrete.VersionModel})
return concrete
class ShareObject(models.Model, metaclass=ShareObjectMeta):
id = models.AutoField(primary_key=True)
class Meta:
abstract = True
| Python | 0 |
bc196c74b3959577f7254d1d5434aeb23d284eea | Convert tabs to spaces | RecordingApp/app/src/scripts/getChunks.py | RecordingApp/app/src/scripts/getChunks.py | #Script to generate a json file containing book name, number of chapters, number of chunks
import json
import urllib.request
import re
result_json_name = "chunks.json"
with open("catalog.json") as file:
data = json.load(file)
output = []
#skip obs for now, loop over all books
for x in range(1, 67):
#gives book name and order (the books are stored out of order in the json)
slug = data[x]["slug"]
sort = data[x]["sort"]
#Get languages.json
url_lang_cat = data[x]["lang_catalog"]
response_lang_cat = urllib.request.urlopen(url_lang_cat)
lang_catalog = json.loads(response_lang_cat.read().decode('utf-8'))
name = lang_catalog[0]["project"]["name"]
#Get resources.json
#0 is for udb, are chunks the same for both?
url_res = lang_catalog[0]["res_catalog"]
response_res = urllib.request.urlopen(url_res)
res_cat = json.loads(response_res.read().decode('utf-8'))
#Get the usfm file
url_usfm = res_cat[0]["usfm"]
response_usfm = urllib.request.urlopen(url_usfm)
usfm_data = response_usfm.read().decode('utf-8')
lines = usfm_data.splitlines()
#keep a count of \c and \s5 tags (chapter and chunk respectively)
chapter = 0
num_chunks = 0
chunk_list = []
for line in lines:
chunk_match = re.search(r'\\s5', line)
#add to the number of chunks seen so far
if chunk_match:
num_chunks += 1
#on a new chapter, append the number of chunks tallied and reset the count
chapter_match = re.search(r'\\c', line)
if chapter_match:
chunk_list.append(num_chunks)
num_chunks = 0
chapter += 1
#append the last chapter
chunk_list.append(num_chunks+1)
#Account for the off by one introduced from chunks coming before chapters
chunk_list_fixed = []
length = len(chunk_list)-1
#eliminate chapter "0"
for i in range(length):
chunk_list_fixed.append(chunk_list[i+1])
#create a dictionary to store the book's data
book = {}
book['slug'] = slug
book['name'] = name
book['sort'] = sort
book['chapters'] = len(chunk_list_fixed)
book['chunks'] = chunk_list_fixed
#add to the list of books
output.append(book)
#output all book data to a json file
with open(result_json_name, 'w') as outfile:
json.dump(output, outfile)
| #Script to generate a json file containing book name, number of chapters, number of chunks
import json
import urllib.request
import re
result_json_name = "chunks.json"
with open("catalog.json") as file:
data = json.load(file)
output = []
#skip obs for now, loop over all books
for x in range(1, 67):
#gives book name and order (the books are stored out of order in the json)
slug = data[x]["slug"]
sort = data[x]["sort"]
#Get languages.json
url_lang_cat = data[x]["lang_catalog"]
response_lang_cat = urllib.request.urlopen(url_lang_cat)
lang_catalog = json.loads(response_lang_cat.read().decode('utf-8'))
name = lang_catalog[0]["project"]["name"]
#Get resources.json
#0 is for udb, are chunks the same for both?
url_res = lang_catalog[0]["res_catalog"]
response_res = urllib.request.urlopen(url_res)
res_cat = json.loads(response_res.read().decode('utf-8'))
#Get the usfm file
url_usfm = res_cat[0]["usfm"]
response_usfm = urllib.request.urlopen(url_usfm)
usfm_data = response_usfm.read().decode('utf-8')
lines = usfm_data.splitlines()
#keep a count of \c and \s5 tags (chapter and chunk respectively)
chapter = 0
num_chunks = 0
chunk_list = []
for line in lines:
chunk_match = re.search(r'\\s5', line)
#add to the number of chunks seen so far
if chunk_match:
num_chunks += 1
#on a new chapter, append the number of chunks tallied and reset the count
chapter_match = re.search(r'\\c', line)
if chapter_match:
chunk_list.append(num_chunks)
num_chunks = 0
chapter += 1
#append the last chapter
chunk_list.append(num_chunks+1)
#Account for the off by one introduced from chunks coming before chapters
chunk_list_fixed = []
length = len(chunk_list)-1
#eliminate chapter "0"
for i in range(length):
chunk_list_fixed.append(chunk_list[i+1])
#create a dictionary to store the book's data
book = {}
book['slug'] = slug
book['name'] = name
book['sort'] = sort
book['chapters'] = len(chunk_list_fixed)
book['chunks'] = chunk_list_fixed
#add to the list of books
output.append(book)
#output all book data to a json file
with open(result_json_name, 'w') as outfile:
json.dump(output, outfile)
| Python | 0.999999 |
bc4a37e3a93a68fa76c47bd355e1b028f0ca6c60 | fix skynetqa url, add timeout and more graceful error handling | daft/scorebig/action.py | daft/scorebig/action.py | __author__ = 'bkeroack'
import logging
import ec2
import salt.client
import requests
import re
class QualType:
Corp = 0
EC2 = 1
CorpMaster = 'laxqualmaster'
EC2Master = 'qualmaster001'
class QualTypeUnknown(Exception):
pass
class UploadBuildAction:
def __init__(self, **kwargs):
#pattern match for prod Skynet registration
self.rx = re.compile("^[0-9]{1,8}-(master|develop|sprint).*")
self.build_name = kwargs['build_name']
def go(self):
ret = True
if self.rx.match(self.build_name):
logging.debug("UploadBuildAction: regexp matches: {}".format(self.build_name))
ret = RegisterBuildSkynet(self.build_name).register()
return ret and RegisterBuildSkynetQA(self.build_name).register()
class RegisterBuild:
def __init__(self, url, build_name):
self.url = url
self.build_name = build_name
def register(self):
url = self.url.format(self.build_name)
logging.debug("ScoreBig: Action: {}: url: {}".format(self.__class__.__name__, url))
try:
r = requests.post(url, timeout=45)
except requests.ConnectionError:
return False
except requests.Timeout:
return False
resp = str(r.text).encode('ascii', 'ignore')
logging.debug("ScoreBig: Action: {}: response: {}".format(self.__class__.__name__, resp))
logging.debug("ScoreBIg: Action: {}: encoding: {}".format(self.__class__.__name__, r.encoding))
return "ok" in resp
class RegisterBuildSkynet(RegisterBuild):
def __init__(self, build_name):
url = "http://skynet.scorebiginc.com/Hacks/RegisterBuild?buildNumber={}"
RegisterBuild.__init__(self, url, build_name)
class RegisterBuildSkynetQA(RegisterBuild):
def __init__(self, build_name):
url = "http://skynetqa.scorebiginc.com/DevQaTools/RegisterBuild?buildNumber={}"
#url = "http://laxsky001/DevQaTools/RegisterBuild?buildNumber={}"
RegisterBuild.__init__(self, url, build_name)
class ProvisionQual:
def __init__(self, qual_type, build):
self.qual_type = qual_type
self.build = build
assert self.build.__class__.__name__ == 'Build'
def start(self):
self.verify_master_server()
self.copy_build()
self.shut_down_master()
self.take_snapshot()
self.create_qual()
self.setup_qual()
self.setup_dns()
# generic master server:
# - verify up
# - copy build over
# - (shut down - optional?)
# generic snapshot:
# - take snapshot of build volume
# generic new server:
# - create new server based on generic server image (ami/vhd)
# loop on creation until up
# generic uptime tasks:
# - IIS bindings
# - service start
# - bootstrapping
# generic DNS
# - create DNS A record
class SaltClient:
def __init__(self):
self.client = salt.client.LocalClient()
def powershell(self, target, cmd, target_type='glob', timeout=120):
return self.client.cmd(target, 'cmd.run', cmd, timeout=timeout, expr_form=target_type, shell='powershell')
def cmd(self, target, cmd, args, target_type='glob', timeout=60):
return self.client.cmd(target, cmd, args, timeout=timeout, expr_form=target_type)
def ping(self, target):
return | __author__ = 'bkeroack'
import logging
import ec2
import salt.client
import requests
import re
class QualType:
Corp = 0
EC2 = 1
CorpMaster = 'laxqualmaster'
EC2Master = 'qualmaster001'
class QualTypeUnknown(Exception):
pass
class UploadBuildAction:
def __init__(self, **kwargs):
#pattern match for prod Skynet registration
self.rx = re.compile("^[0-9]{1,8}-(master|develop|sprint).*")
self.build_name = kwargs['build_name']
def go(self):
ret = True
if self.rx.match(self.build_name):
logging.debug("UploadBuildAction: regexp matches: {}".format(self.build_name))
ret = RegisterBuildSkynet(self.build_name).register()
return ret and RegisterBuildSkynetQA(self.build_name).register()
class RegisterBuild:
def __init__(self, url, build_name):
self.url = url
self.build_name = build_name
def register(self):
url = self.url.format(self.build_name)
logging.debug("ScoreBig: Action: {}: url: {}".format(self.__class__.__name__, url))
r = requests.post(url)
resp = str(r.text).encode('ascii', 'ignore')
logging.debug("ScoreBig: Action: {}: response: {}".format(self.__class__.__name__, resp))
logging.debug("ScoreBIg: Action: {}: encoding: {}".format(self.__class__.__name__, r.encoding))
return "ok" in resp
class RegisterBuildSkynet(RegisterBuild):
def __init__(self, build_name):
url = "http://skynet.scorebiginc.com/Hacks/RegisterBuild?buildNumber={}"
RegisterBuild.__init__(self, url, build_name)
class RegisterBuildSkynetQA(RegisterBuild):
def __init__(self, build_name):
#url = "http://skynetqa.scorebiginc.com/DevQaTools/RegisterBuild?buildNumber={}"
url = "http://laxsky001/DevQaTools/RegisterBuild?buildNumber={}"
RegisterBuild.__init__(self, url, build_name)
class ProvisionQual:
def __init__(self, qual_type, build):
self.qual_type = qual_type
self.build = build
assert self.build.__class__.__name__ == 'Build'
def start(self):
self.verify_master_server()
self.copy_build()
self.shut_down_master()
self.take_snapshot()
self.create_qual()
self.setup_qual()
self.setup_dns()
# generic master server:
# - verify up
# - copy build over
# - (shut down - optional?)
# generic snapshot:
# - take snapshot of build volume
# generic new server:
# - create new server based on generic server image (ami/vhd)
# loop on creation until up
# generic uptime tasks:
# - IIS bindings
# - service start
# - bootstrapping
# generic DNS
# - create DNS A record
class SaltClient:
def __init__(self):
self.client = salt.client.LocalClient()
def powershell(self, target, cmd, target_type='glob', timeout=120):
return self.client.cmd(target, 'cmd.run', cmd, timeout=timeout, expr_form=target_type, shell='powershell')
def cmd(self, target, cmd, args, target_type='glob', timeout=60):
return self.client.cmd(target, cmd, args, timeout=timeout, expr_form=target_type)
def ping(self, target):
return | Python | 0 |
588fb283cc82be28d3fb28bb6a8896c42d1a7eee | Fix the CONF_LOOP check to use the config (#38890) | homeassistant/components/environment_canada/camera.py | homeassistant/components/environment_canada/camera.py | """Support for the Environment Canada radar imagery."""
import datetime
import logging
from env_canada import ECRadar
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_UPDATED = "updated"
CONF_ATTRIBUTION = "Data provided by Environment Canada"
CONF_STATION = "station"
CONF_LOOP = "loop"
CONF_PRECIP_TYPE = "precip_type"
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(minutes=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_LOOP, default=True): cv.boolean,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): cv.matches_regex(r"^C[A-Z]{4}$|^[A-Z]{3}$"),
vol.Inclusive(CONF_LATITUDE, "latlon"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "latlon"): cv.longitude,
vol.Optional(CONF_PRECIP_TYPE): ["RAIN", "SNOW"],
}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Environment Canada camera."""
if config.get(CONF_STATION):
radar_object = ECRadar(
station_id=config[CONF_STATION], precip_type=config.get(CONF_PRECIP_TYPE)
)
else:
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
radar_object = ECRadar(
coordinates=(lat, lon), precip_type=config.get(CONF_PRECIP_TYPE)
)
add_devices(
[ECCamera(radar_object, config.get(CONF_NAME), config[CONF_LOOP])], True
)
class ECCamera(Camera):
"""Implementation of an Environment Canada radar camera."""
def __init__(self, radar_object, camera_name, is_loop):
"""Initialize the camera."""
super().__init__()
self.radar_object = radar_object
self.camera_name = camera_name
self.is_loop = is_loop
self.content_type = "image/gif"
self.image = None
self.timestamp = None
def camera_image(self):
"""Return bytes of camera image."""
self.update()
return self.image
@property
def name(self):
"""Return the name of the camera."""
if self.camera_name is not None:
return self.camera_name
return "Environment Canada Radar"
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {ATTR_ATTRIBUTION: CONF_ATTRIBUTION, ATTR_UPDATED: self.timestamp}
return attr
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update radar image."""
if self.is_loop:
self.image = self.radar_object.get_loop()
else:
self.image = self.radar_object.get_latest_frame()
self.timestamp = self.radar_object.timestamp
| """Support for the Environment Canada radar imagery."""
import datetime
import logging
from env_canada import ECRadar
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_UPDATED = "updated"
CONF_ATTRIBUTION = "Data provided by Environment Canada"
CONF_STATION = "station"
CONF_LOOP = "loop"
CONF_PRECIP_TYPE = "precip_type"
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(minutes=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_LOOP, default=True): cv.boolean,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): cv.matches_regex(r"^C[A-Z]{4}$|^[A-Z]{3}$"),
vol.Inclusive(CONF_LATITUDE, "latlon"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "latlon"): cv.longitude,
vol.Optional(CONF_PRECIP_TYPE): ["RAIN", "SNOW"],
}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Environment Canada camera."""
if config.get(CONF_STATION):
radar_object = ECRadar(
station_id=config[CONF_STATION], precip_type=config.get(CONF_PRECIP_TYPE)
)
else:
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
radar_object = ECRadar(
coordinates=(lat, lon), precip_type=config.get(CONF_PRECIP_TYPE)
)
add_devices([ECCamera(radar_object, config.get(CONF_NAME))], True)
class ECCamera(Camera):
"""Implementation of an Environment Canada radar camera."""
def __init__(self, radar_object, camera_name):
"""Initialize the camera."""
super().__init__()
self.radar_object = radar_object
self.camera_name = camera_name
self.content_type = "image/gif"
self.image = None
self.timestamp = None
def camera_image(self):
"""Return bytes of camera image."""
self.update()
return self.image
@property
def name(self):
"""Return the name of the camera."""
if self.camera_name is not None:
return self.camera_name
return "Environment Canada Radar"
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {ATTR_ATTRIBUTION: CONF_ATTRIBUTION, ATTR_UPDATED: self.timestamp}
return attr
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update radar image."""
if CONF_LOOP:
self.image = self.radar_object.get_loop()
else:
self.image = self.radar_object.get_latest_frame()
self.timestamp = self.radar_object.timestamp
| Python | 0 |
c96cccbe7afc282aedbb316a2e9e41e47e68bcb6 | fix efs lvm create (#610) | chroma-manager/tests/integration/utils/test_blockdevices/test_blockdevice_lvm.py | chroma-manager/tests/integration/utils/test_blockdevices/test_blockdevice_lvm.py | # Copyright (c) 2017 Intel Corporation. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import re
from tests.integration.utils.test_blockdevices.test_blockdevice import TestBlockDevice
class TestBlockDeviceLvm(TestBlockDevice):
_supported_device_types = ['lvm']
def __init__(self, device_type, device_path):
super(TestBlockDeviceLvm, self).__init__(device_type, device_path)
@property
def preferred_fstype(self):
return 'ldiskfs'
# Create a lvm on the device.
@property
def prepare_device_commands(self):
# FIXME: the use of --yes in the {vg,lv}create commands is a work-around for #500
# and should be reverted when #500 is fixed
return [
"wipefs -a {}".format(self._device_path),
"vgcreate --yes %s %s; lvcreate --yes --wipesignatures n -l 100%%FREE --name %s %s"
% (self.vg_name, self._device_path, self.lv_name, self.vg_name)
]
@property
def vg_name(self):
return "vg_%s" % "".join(
[c for c in self._device_path if re.match(r'\w', c)])
@property
def lv_name(self):
return "lv_%s" % "".join(
[c for c in self._device_path if re.match(r'\w', c)])
@property
def device_path(self):
return "/dev/%s/%s" % (self.vg_name, self.lv_name)
@classmethod
def clear_device_commands(cls, device_paths):
lv_destroy = [
"if lvdisplay /dev/{0}/{1}; then lvchange -an /dev/{0}/{1} && lvremove /dev/{0}/{1}; else exit 0; fi".
format(
TestBlockDeviceLvm('lvm', device_path).vg_name,
TestBlockDeviceLvm('lvm', device_path).lv_name)
for device_path in device_paths
]
vg_destroy = [
"if vgdisplay {0}; then vgremove {0}; else exit 0; fi".format(
TestBlockDeviceLvm('lvm', device_path).vg_name)
for device_path in device_paths
]
return lv_destroy + vg_destroy
@property
def install_packages_commands(self):
return []
| # Copyright (c) 2017 Intel Corporation. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import re
from tests.integration.utils.test_blockdevices.test_blockdevice import TestBlockDevice
class TestBlockDeviceLvm(TestBlockDevice):
_supported_device_types = ['lvm']
def __init__(self, device_type, device_path):
super(TestBlockDeviceLvm, self).__init__(device_type, device_path)
@property
def preferred_fstype(self):
return 'ldiskfs'
# Create a lvm on the device.
@property
def prepare_device_commands(self):
# FIXME: the use of --yes in the {vg,lv}create commands is a work-around for #500
# and should be reverted when #500 is fixed
return [
"vgcreate --yes %s %s; lvcreate --yes --wipesignatures n -l 100%%FREE --name %s %s"
% (self.vg_name, self._device_path, self.lv_name, self.vg_name)
]
@property
def vg_name(self):
return "vg_%s" % "".join(
[c for c in self._device_path if re.match(r'\w', c)])
@property
def lv_name(self):
return "lv_%s" % "".join(
[c for c in self._device_path if re.match(r'\w', c)])
@property
def device_path(self):
return "/dev/%s/%s" % (self.vg_name, self.lv_name)
@classmethod
def clear_device_commands(cls, device_paths):
lv_destroy = [
"if lvdisplay /dev/{0}/{1}; then lvchange -an /dev/{0}/{1} && lvremove /dev/{0}/{1}; else exit 0; fi".
format(
TestBlockDeviceLvm('lvm', device_path).vg_name,
TestBlockDeviceLvm('lvm', device_path).lv_name)
for device_path in device_paths
]
vg_destroy = [
"if vgdisplay {0}; then vgremove {0}; else exit 0; fi".format(
TestBlockDeviceLvm('lvm', device_path).vg_name)
for device_path in device_paths
]
return lv_destroy + vg_destroy
@property
def install_packages_commands(self):
return []
| Python | 0 |
d0e75c65505713a5f044d67a08e6697c4e332611 | Add djangobower and update static settings | darts/darts/settings.py | darts/darts/settings.py | """
Django settings for darts project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yg&r*1k#$nak&g*9ay6zh!+@*=f=ids5u10a!!r^yjvltw0&8='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'djangobower'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'darts.urls'
WSGI_APPLICATION = 'darts.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_DIRS = (
os.path.join(BASE_DIR, 'assets'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Djangobower
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'components')
BOWER_INSTALLED_APPS = (
'jquery',
'bootstrap',
) | """
Django settings for darts project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yg&r*1k#$nak&g*9ay6zh!+@*=f=ids5u10a!!r^yjvltw0&8='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'darts.urls'
WSGI_APPLICATION = 'darts.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| Python | 0 |
5273e0fcdf2b7f1b03301cb0834b07da82064b98 | Remove trailing /n | mailproc/vidmaster.py | mailproc/vidmaster.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Zoe vidmaster - https://github.com/rmed/zoe-vidmaster
#
# Copyright (c) 2015 Rafael Medina García <rafamedgar@gmail.com>
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--mail-subject', dest='subject')
parser.add_argument('--msg-sender-alias', dest='sender')
parser.add_argument('--application/octet-stream', dest='script')
if __name__ == '__main__':
args, unknown = parser.parse_known_args()
if args.subject != "vidmaster":
sys.exit(0)
print("message dst=vidmaster&tag=compose&script=%s&sender=%s" % (
args.script, args.sender))
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Zoe vidmaster - https://github.com/rmed/zoe-vidmaster
#
# Copyright (c) 2015 Rafael Medina García <rafamedgar@gmail.com>
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--mail-subject', dest='subject')
parser.add_argument('--msg-sender-alias', dest='sender')
parser.add_argument('--application/octet-stream', dest='script')
if __name__ == '__main__':
args, unknown = parser.parse_known_args()
if args.subject != "vidmaster":
sys.exit(0)
print("message dst=vidmaster&tag=compose&script=%s&sender=%s\n" % (
args.script, args.sender))
| Python | 0.000017 |
a7a1a83bf0f6546b1e985b7c4611b5b83df25853 | Add python's version in breakpad stack traces | breakpad.py | breakpad.py | # Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Breakpad for Python.
Sends a notification when a process stops on an exception.
It is only enabled when all these conditions are met:
1. hostname finishes with '.google.com'
2. main module name doesn't contain the word 'test'
3. no NO_BREAKPAD environment variable is defined
"""
import atexit
import getpass
import os
import urllib
import traceback
import socket
import sys
# Configure these values.
DEFAULT_URL = 'https://chromium-status.appspot.com/breakpad'
_REGISTERED = False
def FormatException(e):
"""Returns a human readable form of an exception.
Adds the maximum number of interesting information in the safest way."""
try:
out = repr(e)
except Exception:
out = ''
try:
out = str(e)
if isinstance(e, Exception):
# urllib exceptions, usually the HTTP headers.
if hasattr(e, 'headers'):
out += '\nHeaders: %s' % e.headers
if hasattr(e, 'url'):
out += '\nUrl: %s' % e.url
if hasattr(e, 'msg'):
out += '\nMsg: %s' % e.msg
# The web page in some urllib exceptions.
if hasattr(e, 'read') and callable(e.read):
out += '\nread(): %s' % e.read()
if hasattr(e, 'info') and callable(e.info):
out += '\ninfo(): %s' % e.info()
except Exception:
pass
return out
def SendStack(last_tb, stack, url=None):
"""Sends the stack trace to the breakpad server."""
if not url:
url = DEFAULT_URL
print 'Sending crash report ...'
try:
params = {
'args': sys.argv,
'stack': stack[0:4096],
'user': getpass.getuser(),
'exception': FormatException(last_tb),
'host': socket.getfqdn(),
'cwd': os.getcwd(),
'version': sys.version,
}
# pylint: disable=W0702
print('\n'.join(' %s: %s' % (k, v[0:50]) for k, v in params.iteritems()))
request = urllib.urlopen(url, urllib.urlencode(params))
print(request.read())
request.close()
except IOError:
print('There was a failure while trying to send the stack trace. Too bad.')
def CheckForException():
"""Runs at exit. Look if there was an exception active."""
last_value = getattr(sys, 'last_value', None)
if last_value and not isinstance(last_value, KeyboardInterrupt):
last_tb = getattr(sys, 'last_traceback', None)
if last_tb:
SendStack(last_value, ''.join(traceback.format_tb(last_tb)))
def Register():
"""Registers the callback at exit. Calling it multiple times is no-op."""
global _REGISTERED
if _REGISTERED:
return
_REGISTERED = True
atexit.register(CheckForException)
# Skip unit tests and we don't want anything from non-googler.
if (not 'test' in sys.modules['__main__'].__file__ and
not 'NO_BREAKPAD' in os.environ and
(socket.getfqdn().endswith('.google.com') or
socket.getfqdn().endswith('.chromium.org'))):
Register()
# Uncomment this line if you want to test it out.
#Register()
| # Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Breakpad for Python.
Sends a notification when a process stops on an exception.
It is only enabled when all these conditions are met:
1. hostname finishes with '.google.com'
2. main module name doesn't contain the word 'test'
3. no NO_BREAKPAD environment variable is defined
"""
import atexit
import getpass
import os
import urllib
import traceback
import socket
import sys
# Configure these values.
DEFAULT_URL = 'https://chromium-status.appspot.com/breakpad'
_REGISTERED = False
def FormatException(e):
"""Returns a human readable form of an exception.
Adds the maximum number of interesting information in the safest way."""
try:
out = repr(e)
except Exception:
out = ''
try:
out = str(e)
if isinstance(e, Exception):
# urllib exceptions, usually the HTTP headers.
if hasattr(e, 'headers'):
out += '\nHeaders: %s' % e.headers
if hasattr(e, 'url'):
out += '\nUrl: %s' % e.url
if hasattr(e, 'msg'):
out += '\nMsg: %s' % e.msg
# The web page in some urllib exceptions.
if hasattr(e, 'read') and callable(e.read):
out += '\nread(): %s' % e.read()
if hasattr(e, 'info') and callable(e.info):
out += '\ninfo(): %s' % e.info()
except Exception:
pass
return out
def SendStack(last_tb, stack, url=None):
"""Sends the stack trace to the breakpad server."""
if not url:
url = DEFAULT_URL
print 'Sending crash report ...'
try:
params = {
'args': sys.argv,
'stack': stack[0:4096],
'user': getpass.getuser(),
'exception': FormatException(last_tb),
'host': socket.getfqdn(),
'cwd': os.getcwd(),
}
# pylint: disable=W0702
print('\n'.join(' %s: %s' % (k, v[0:50]) for k, v in params.iteritems()))
request = urllib.urlopen(url, urllib.urlencode(params))
print(request.read())
request.close()
except IOError:
print('There was a failure while trying to send the stack trace. Too bad.')
def CheckForException():
"""Runs at exit. Look if there was an exception active."""
last_value = getattr(sys, 'last_value', None)
if last_value and not isinstance(last_value, KeyboardInterrupt):
last_tb = getattr(sys, 'last_traceback', None)
if last_tb:
SendStack(last_value, ''.join(traceback.format_tb(last_tb)))
def Register():
"""Registers the callback at exit. Calling it multiple times is no-op."""
global _REGISTERED
if _REGISTERED:
return
_REGISTERED = True
atexit.register(CheckForException)
# Skip unit tests and we don't want anything from non-googler.
if (not 'test' in sys.modules['__main__'].__file__ and
not 'NO_BREAKPAD' in os.environ and
(socket.getfqdn().endswith('.google.com') or
socket.getfqdn().endswith('.chromium.org'))):
Register()
# Uncomment this line if you want to test it out.
#Register()
| Python | 0.000057 |
95d9c3ecd9a8c2aa73fd91ffdf40a55fee541dd3 | Enable flatpages without middleware. | suorganizer/urls.py | suorganizer/urls.py | """suorganizer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.flatpages import \
urls as flatpage_urls
from blog import urls as blog_urls
from contact import urls as contact_urls
from organizer.urls import (
newslink as newslink_urls,
startup as startup_urls, tag as tag_urls)
from .views import redirect_root
urlpatterns = [
url(r'^$', redirect_root),
url(r'^admin/', include(admin.site.urls)),
url(r'^blog/', include(blog_urls)),
url(r'^contact/', include(contact_urls)),
url(r'^newslink/', include(newslink_urls)),
url(r'^startup/', include(startup_urls)),
url(r'^tag/', include(tag_urls)),
url(r'^', include(flatpage_urls)),
]
| """suorganizer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from blog import urls as blog_urls
from contact import urls as contact_urls
from organizer.urls import (
newslink as newslink_urls,
startup as startup_urls, tag as tag_urls)
from .views import redirect_root
urlpatterns = [
url(r'^$', redirect_root),
url(r'^admin/', include(admin.site.urls)),
url(r'^blog/', include(blog_urls)),
url(r'^contact/', include(contact_urls)),
url(r'^newslink/', include(newslink_urls)),
url(r'^startup/', include(startup_urls)),
url(r'^tag/', include(tag_urls)),
]
| Python | 0 |
411751a10ece8b84bb122422b8d58f22710731aa | Fix typo | relayer/flask/logging_middleware.py | relayer/flask/logging_middleware.py | from datetime import datetime
class LoggingMiddleware(object):
def __init__(self, app, wsgi_app, context, logging_topic):
self.app = app
self.wsgi_app = wsgi_app
self.context = context
self.logging_topic = logging_topic
def __call__(self, environ, start_response):
with self.app.app_context():
start_time = datetime.utcnow()
status_code = None
content_length = None
self.context.start_request()
def logging_start_response(status, response_headers, exc_info=None):
nonlocal status_code, content_length
status_code = int(status.partition(' ')[0])
for name, value in response_headers:
if name.lower() == 'content-length':
content_length = int(value)
break
return start_response(status, response_headers, exc_info)
response = self.wsgi_app(environ, logging_start_response)
if content_length is None:
content_length = len(b''.join(response))
elapsed_time = datetime.utcnow() - start_time
elapsed_time_milliseconds = elapsed_time.microseconds / 1000.0 + elapsed_time.seconds * 1000
request_log = {
'date': start_time.isoformat(),
'user_agent': environ.get('HTTP_USER_AGENT'),
'method': environ.get('REQUEST_METHOD'),
'path': environ.get('PATH_INFO'),
'query_string': environ.get('QUERY_STRING'),
'remote_addr': environ.get('X_REAL_IP', environ.get('REMOTE_ADDR')),
'x_forwarded_for': environ.get('X_FORWARDED_FOR'),
'status': status_code,
'content_length': content_length,
'request_time': elapsed_time_milliseconds
}
self.context.end_request(self.logging_topic, request_log)
return response
| from datetime import datetime
class LoggingMiddleware(object):
def __init__(self, app, wsgi_app, context, logging_topic):
self.app = app
self.wsgi_app = wsgi_app
self.context = context
self.logging_topic = logging_topic
def __call__(self, environ, start_response):
with self.app.app_context():
start_time = datetime.utcnow()
status_code = None
content_length = None
self.context.start_request()
def logging_start_response(status, response_headers, exc_info=None):
nonlocal status_code, content_length
status_code = int(status.partition(' ')[0])
for name, value in response_headers:
if name.lower() == 'content-length':
content_length = int(value)
break
return start_response(status, response_headers, exc_info)
response = self.wsgi_app(environ, logging_start_response)
if content_length is None:
content_length = len(b''.join(response))
elapsed_time = datetime.utcnow() - start_time
elapsed_time_milliseconds = elapsed_time.microseconds / 1000.0 + elapsed_time.seconds * 1000
request_log = {
'date': start_time.isoformat(),
'user_agent': environ.get('HTTP_USER_AGENT'),
'method': environ.get('REQUEST_METHOD'),
'path': environ.get('PATH_INFO'),
'query_string': environ.get('QUERY_STRING'),
'remote_addr': environ.get('X_REAL_IP', environ.get('REMOTE_ADDR')),
'x_forwarded_for': environ.get('X_Forwarded_For'),
'status': status_code,
'content_length': content_length,
'request_time': elapsed_time_milliseconds
}
self.context.end_request(self.logging_topic, request_log)
return response
| Python | 0.999999 |
911e961f189967554bc5a046f022bb1c394cc119 | Debug and test before finishing. p50-52 | bruteKey.py | bruteKey.py | #!/usr/bin/env python
import pexpect, optparse, os
from threading import *
maxConnections = 5
connection_lock = BoundSemapohre(value=maxConnections)
Stop = False
Fails = 0
usage = "Example: bruteKey.py -H <target> -u <user name> -d <directory> "
def banner():
print "##### SSH Weak Key Exploit #######"
usage
print""
def connect(user, host, keyfile, release):
global Stop
global Fails
try:
perm_denied = 'Permission denied'
ssh_newkey = 'Are you sure you wan tto continue'
conn_closed = 'Connection closed by remote host'
opt = ' -o PasswordAuthentication=no'
connStr = 'ssh '+user+'@'+host+' -i'+keyfile+opt
child = pexpect.spawn(connStr)
ret = child.expect([pexpect.TIMEOUT, perm_denied, ssh_newkey, conn_closed, '$', '#', ])
if ret == 2:
print '[-] Adding Host to ~/.ssh/known_hosts'
child.sendline('yes')
connect(user, host, keyfile, False)
elif ret == 3:
print '[-] Connection Closed By Remote Host'
Fails += 1
elif ret > 3:
print '[+] Success. '+str(keyfile)
Stop = True
finally:
if release:
connection_lock.release()
def main():
parser = optparse.OptionParser('usage%prog -H '+'target host -u <user> -d <directory>')
parser.add_option('-H', dest='tgtHost', type='string', help='specify target host')
parser.add_option('-d', dest='passDir', type='string', help='specify directory with keys')
parser.add_option('u', dest=user, type='string', help='specify the user')
(options, args) = parser.parse_args()
host = options.tgtHost
passDir = options.passDir
user =options.user
if host == None or passDir == None or user == None:
print parser.usage
exit(0)
for filename in os.listdir(passDir):
if Stop:
print '[*] Exiting: Key Found.'
exit(0)
if Fails > 5:
print '[!] Exiting: '+'Too Many Connections Closed by Remote Host.'
print '[!] Adjust number of simultaneous threads.'
exit(0)
connection_lock.acquire()
fullpath = os.path.join(passDir, filename)
print '[-] Testing Keyfile '+str(fullpath)
t = Thread(target=connect, args =(user, host, fullpath, True))
child = t.start()
if __name__ == '__main__':
main() | #!/usr/bin/env python
import pexpect, optparse, os
from threading import *
maxConnections = 5
connection_lock = BoundSemapohre(value=maxConnections)
Stop = False
Fails = 0
usage = "Example: bruteKey.py -H <target> -u <user name> -d <directory> "
def banner():
print "##### SSH Weak Key Exploit #######"
usage
print""
def connect(user, host, keyfile, release):
global Stop
global Fails
try:
perm_denied = 'Permission denied'
ssh_newkey = 'Are you sure you wan tto continue'
conn_closed = 'Connection closed by remote host'
opt = ' -o PasswordAuthentication=no'
connStr = 'ssh '+user+'@'+host+' -i'+keyfile+opt
child = pexpect.spawn(connStr)
ret = child.expect([pexpect.TIMEOUT, perm_denied, ssh_newkey, conn_closed, '$', '#', ])
if ret == 2:
print '[-] Adding Host to ~/.ssh/known_hosts'
child.sendline('yes')
connect(user, host, keyfile, False)
elif ret == 3:
print '[-] Connection Closed By Remote Host'
Fails += 1
elif ret > 3:
print '[+] Success. '+str(keyfile)
Stop = True
finally:
if release:
connection_lock.release()
def main():
parser = optparse.OptionParser('usage%prog -H '+'target host -u <user> -d <directory>')
parser.
if __name__ == '__main__':
main() | Python | 0 |
899fcb64dbc8cc6bb9bbaf0a41f8237a9b4eea5a | remove unnecessary loop in _resolve_alias | core/argument_parser.py | core/argument_parser.py | from core.get_names import get_idol_names
IDOL_NAMES = get_idol_names()
ALIASES = {
'name': {
'honk': 'Kousaka Honoka',
'eri': 'Ayase Eli',
'yohane': 'Tsushima Yoshiko',
'hana': 'Koizumi Hanayo',
'pana': 'Koizumi Hanayo',
'tomato': "Nishikino Maki",
'zura': 'Kunikida Hanamaru',
'technology': 'Kunikida Hanamaru',
'woah': 'Kunikida Hanamaru',
'mike': 'Kurosawa Ruby', # Memes
'nell': "Yazawa Nico"
},
'main_unit': {
'muse': "μ's",
"μ's": "μ's",
"µ's": "μ's",
'aqours': 'Aqours',
'aquas': 'Aqours',
'aquors': 'Aqours',
'a-rise': 'A-RISE',
'arise': 'A-RISE',
'saint': 'Saint Snow',
'snow': 'Saint Snow'
},
'sub_unit': {
'lily': 'Lily White',
'white': 'Lily White',
'bibi': 'Bibi',
'printemps': 'Printemps',
'guilty': 'Guilty Kiss',
'kiss': 'Guilty Kiss',
'azalea': 'Azalea',
'cyaron': 'CYaRon!',
'cyaron!': 'CYaRon!',
'crayon': 'CYaRon!',
'crayon!': 'CYaRon!'
}
}
def parse_arguments(args: tuple, allow_unsupported_lists: bool = False) -> dict:
"""
Parse all user arguments
:param args: Tuple of all arguments
:param allow_unsupported_lists: Whether parameters that School Idol
Tomodachi does not allow multiple values off are reduced.
:return: A list of tuples of (arg_type, arg_value)
"""
parsed_args = {
'name': [],
'main_unit': [],
'sub_unit': [],
'year': [],
'attribute': [],
'rarity': []
}
for arg in args:
arg_type, arg_value = _parse_argument(arg)
if arg_type:
parsed_args[arg_type].append(arg_value)
# Covert all values to sets and back to lists to remove duplicates.
for arg_type in parsed_args:
parsed_args[arg_type] = list(set(parsed_args[arg_type]))
# Remove mutiple values from fields not supported
if not allow_unsupported_lists:
for key in ('sub_unit', 'attribute', 'year'):
parsed_args[key] = parsed_args[key][:1]
return parsed_args
def _parse_argument(arg: str) -> tuple:
"""
Parse user argument.
:param arg: An argument.
:return: Tuple of (arg_type, arg_value)
"""
arg = arg.lower()
# Check for names
for full_name in IDOL_NAMES:
name_split = full_name.split(' ')
# Check if name is exact match
if arg.title() == name_split[-1]:
return 'name', full_name
# Check for unit and idol names by alias
for key, val in ALIASES.items():
search_result = val.get(arg, None)
if search_result:
return key, search_result
# Check for years
if arg in ('first', 'second', 'third'):
return 'year', arg.title()
# Check for attribute
if arg in ('cool', 'smile', 'pure'):
return 'attribute', arg.title()
# Check for rarity
if arg.upper() in ('N', 'R', 'SR', 'SSR', 'UR'):
return 'rarity', arg.upper()
return None, None
| from core.get_names import get_idol_names
IDOL_NAMES = get_idol_names()
ALIASES = {
"name": {
("honk",): "Kousaka Honoka",
("eri",): "Ayase Eli",
("yohane",): "Tsushima Yoshiko",
("hana", "pana"): "Koizumi Hanayo",
("tomato",): "Nishikino Maki",
("zura", "woah", "technology"): "Kunikida Hanamaru",
("mike",): "Kurosawa Ruby", # Memes
("nell",): "Yazawa Nico"
},
"main_unit": {
("muse", "μ's", "µ's"): "μ's",
("aqours", "aquas", "aquors"): "Aqours",
("a-rise", "arise"): "A-RISE",
("saint", "snow"): "Saint Snow"
},
"sub_unit": {
("lily", "white"): "Lily White",
("bibi",): "Bibi",
("printemps",): "Printemps",
("guilty", "kiss"): "Guilty Kiss",
("azalea",): "Azalea",
("cyaron", "cyaron!", "crayon", "crayon!"): "CYaRon!"
}
}
def parse_arguments(args: tuple, allow_unsupported_lists: bool=False) -> dict:
"""
Parse all user arguments
:param args: Tuple of all arguments
:param allow_unsupported_lists: Whether parameters that School Idol
Tomodachi does not allow multiple values off are reduced.
:return: A list of tuples of (arg_type, arg_value)
"""
parsed_args = {
"name": [],
"main_unit": [],
"sub_unit": [],
"year": [],
"attribute": [],
"rarity": []
}
for arg in args:
arg_type, arg_value = _parse_argument(arg)
if arg_type:
parsed_args[arg_type].append(arg_value)
# Covert all values to sets and back to lists to remove duplicates.
for arg_type in parsed_args:
parsed_args[arg_type] = list(set(parsed_args[arg_type]))
# Remove mutiple values from fields not supported
if not allow_unsupported_lists:
if len(parsed_args["sub_unit"]) > 1:
parsed_args["sub_unit"] = [parsed_args["sub_unit"][0]]
if len(parsed_args["attribute"]) > 1:
parsed_args["attribute"] = [parsed_args["attribute"][0]]
if len(parsed_args["year"]) > 1:
parsed_args["year"] = [parsed_args["year"][0]]
return parsed_args
def _parse_argument(arg: str) -> tuple:
"""
Parse user argument.
:param arg: An argument.
:return: Tuple of (arg_type, arg_value)
"""
arg = arg.lower()
# Check for names
for full_name in IDOL_NAMES:
name_split = full_name.split(" ")
# Check if name is exact match
if arg.title() == name_split[-1]:
return ("name", full_name)
# Check for unit and idol names by alias
for alias_dict in ALIASES:
search_result = _resolve_alias(arg, ALIASES[alias_dict])
if search_result:
return (alias_dict, search_result)
# Check for years
if arg in ["first", "second", "third"]:
return ("year", arg.title())
# Check for attribute
if arg in ["cool", "smile", "pure"]:
return ("attribute", arg.title())
# Check for rarity
if arg.upper() in ["N", "R", "SR", "SSR", "UR"]:
return ("rarity", arg.upper())
return (None, None)
def _resolve_alias(target: str, alias_dict: dict) -> str:
"""
Resolves an alias from a given alias dicitonary.
:param target: Target string being searched for.
:alias_dict: Alias dicitonary being searched in.
:return: Alias result if found, otherwise returns an empty string.
"""
for key in alias_dict:
if target in key:
return alias_dict[key]
return ""
| Python | 0.000027 |
f3578096219dbb82572063c8a6dbb75be4da07ac | Update P03_combinePDFs fixed reading encrypted files | books/AutomateTheBoringStuffWithPython/Chapter13/P03_combinePDFs.py | books/AutomateTheBoringStuffWithPython/Chapter13/P03_combinePDFs.py | #! python3
# combinePdfs.py - Combines all the PDFs in the current working directory into
# a single PDF.
import PyPDF4, os
# Get all the PDF filenames.
pdfFiles = []
for filename in os.listdir('.'):
if filename.endswith(".pdf"):
pdfFiles.append(filename)
pdfFiles.sort(key=str.lower)
pdfWriter = PyPDF4.PdfFileWriter()
# Loop through all the PDF files.
for filename in pdfFiles:
pdfFileObj = open(filename, "rb")
pdfReader = PyPDF4.PdfFileReader(pdfFileObj)
if pdfReader.isEncrypted and filename == "encrypted.pdf":
pdfReader.decrypt("rosebud")
if pdfReader.isEncrypted and filename == "encryptedminutes.pdf":
pdfReader.decrypt("swordfish")
# Loop through all the pages (except the first) and add them.
for pageNum in range(1, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
# Save the resulting PDF to a file.
pdfOutput = open("allminutes.pdf", "wb")
pdfWriter.write(pdfOutput)
pdfOutput.close()
| #! python3
# combinePdfs.py - Combines all the PDFs in the current working directory into
# a single PDF.
import PyPDF4, os
# Get all the PDF filenames.
pdfFiles = []
for filename in os.listdir('.'):
if filename.endswith(".pdf"):
pdfFiles.append(filename)
pdfFiles.sort(key=str.lower)
pdfWriter = PyPDF4.PdfFileWriter()
# Loop through all the PDF files.
for filename in pdfFiles:
pdfFileObj = open(filename, "rb")
pdfReader = PyPDF4.PdfFileReader(pdfFileObj)
# Loop through all the pages (except the first) and add them.
for pageNum in range(1, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage(pageObj)
# Save the resulting PDF to a file.
pdfOutput = open("allminutes.pdf", "wb")
pdfWriter.write(pdfOutput)
pdfOutput.close()
| Python | 0 |
26ce46c14f3fc5d38253617822974c21b488dd95 | Set priority to 0, set viability to permanently null. Add test to ensure keyring can render itself. Ref #358. | keyring/backends/chainer.py | keyring/backends/chainer.py | """
Implementation of a keyring backend chainer.
This is specifically not a viable backend, and must be
instantiated directly with a list of ordered backends.
"""
from __future__ import absolute_import
from ..backend import KeyringBackend
class ChainerBackend(KeyringBackend):
"""
>>> ChainerBackend(())
<keyring.backends.chainer.ChainerBackend object at ...>
"""
priority = 0
viable = False
def __init__(self, backends):
self.backends = list(backends)
def get_password(self, service, username):
for backend in self.backends:
password = backend.get_password(service, username)
if password is not None:
return password
def set_password(self, service, username, password):
for backend in self.backends:
try:
return backend.set_password(service, username, password)
except NotImplementedError:
pass
def delete_password(self, service, username):
for backend in self.backends:
try:
return backend.delete_password(service, username)
except NotImplementedError:
pass
def get_credential(self, service, username):
for backend in self.backends:
credential = backend.get_credential(service, username)
if credential is not None:
return credential
| """
Implementation of a keyring backend chainer.
This is specifically not a viable backend, and must be
instantiated directly with a list of ordered backends.
"""
from __future__ import absolute_import
from ..backend import KeyringBackend
class ChainerBackend(KeyringBackend):
def __init__(self, backends):
self.backends = list(backends)
def get_password(self, service, username):
for backend in self.backends:
password = backend.get_password(service, username)
if password is not None:
return password
def set_password(self, service, username, password):
for backend in self.backends:
try:
return backend.set_password(service, username, password)
except NotImplementedError:
pass
def delete_password(self, service, username):
for backend in self.backends:
try:
return backend.delete_password(service, username)
except NotImplementedError:
pass
def get_credential(self, service, username):
for backend in self.backends:
credential = backend.get_credential(service, username)
if credential is not None:
return credential
| Python | 0 |
d6ef946df0497868de9d035ab0d56d0d828c2be1 | Disable failing assertion in python test | tests/query_test/test_hdfs_fd_caching.py | tests/query_test/test_hdfs_fd_caching.py | # Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pytest
from copy import copy
from subprocess import call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import *
from tests.common.test_vector import *
from tests.common.impala_cluster import ImpalaCluster
from tests.common.test_dimensions import create_exec_option_dimension
from tests.common.skip import SkipIfS3
from tests.util.shell_util import exec_process
class TestHdfsFdCaching(ImpalaTestSuite):
"""
This test suite tests the behavior of HDFS file descriptor caching by evaluating the
metrics exposed by the Impala daemon.
"""
NUM_ROWS = 10000
@classmethod
def file_format_constraint(cls, v):
return v.get_value('table_format').file_format in ["parquet"]
@classmethod
def add_test_dimensions(cls):
super(TestHdfsFdCaching, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(cls.file_format_constraint)
@classmethod
def get_workload(cls):
return 'functional-query'
def setup_method(self, method):
self.cleanup_db("cachefd")
self.client.execute("create database cachefd")
self.client.execute("create table cachefd.simple(id int, col1 int, col2 int) "
"stored as parquet")
buf = "insert into cachefd.simple values"
self.client.execute(buf + ", ".join(["({0},{0},{0})".format(x) for x in range(self.NUM_ROWS)]))
def teardown_method(self, methd):
self.cleanup_db("cachedfd")
@pytest.mark.execute_serially
def test_simple_scan(self, vector):
# One table, one file, one handle
num_handles_before = self.cached_handles()
self.execute_query("select * from cachefd.simple limit 1", vector=vector)
num_handles_after = self.cached_handles()
# Should have at least one more handle cached and not more than three more
assert num_handles_after >= (num_handles_before + 1)
assert num_handles_after <= (num_handles_before + 3)
# No open handles if scanning is finished
assert self.outstanding_handles() == 0
# No change when reading the table again
for x in range(10):
self.execute_query("select * from cachefd.simple limit 1", vector=vector)
# TODO This assertion fails reliably in the Kudu feature branch build for reasons yet
# unknown, since it seems unrelated to other changes. Once the root cause for the
# failure is known this assertion should be uncommented.
# assert num_handles_after == self.cached_handles()
assert self.outstanding_handles() == 0
def cached_handles(self):
return self.get_agg_metric("impala-server.io.mgr.num-cached-file-handles")
def outstanding_handles(self):
return self.get_agg_metric("impala-server.io.mgr.num-file-handles-outstanding")
def get_agg_metric(self, key, fun=sum):
cluster = ImpalaCluster()
return fun([s.service.get_metric_value(key) for s
in cluster.impalads])
| # Copyright 2012 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pytest
from copy import copy
from subprocess import call
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.impala_test_suite import *
from tests.common.test_vector import *
from tests.common.impala_cluster import ImpalaCluster
from tests.common.test_dimensions import create_exec_option_dimension
from tests.common.skip import SkipIfS3
from tests.util.shell_util import exec_process
class TestHdfsFdCaching(ImpalaTestSuite):
"""
This test suite tests the behavior of HDFS file descriptor caching by evaluating the
metrics exposed by the Impala daemon.
"""
NUM_ROWS = 10000
@classmethod
def file_format_constraint(cls, v):
return v.get_value('table_format').file_format in ["parquet"]
@classmethod
def add_test_dimensions(cls):
super(TestHdfsFdCaching, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(cls.file_format_constraint)
@classmethod
def get_workload(cls):
return 'functional-query'
def setup_method(self, method):
self.cleanup_db("cachefd")
self.client.execute("create database cachefd")
self.client.execute("create table cachefd.simple(id int, col1 int, col2 int) "
"stored as parquet")
buf = "insert into cachefd.simple values"
self.client.execute(buf + ", ".join(["({0},{0},{0})".format(x) for x in range(self.NUM_ROWS)]))
def teardown_method(self, methd):
self.cleanup_db("cachedfd")
@pytest.mark.execute_serially
def test_simple_scan(self, vector):
# One table, one file, one handle
num_handles_before = self.cached_handles()
self.execute_query("select * from cachefd.simple limit 1", vector=vector)
num_handles_after = self.cached_handles()
# Should have at least one more handle cached and not more than three more
assert num_handles_after >= (num_handles_before + 1)
assert num_handles_after <= (num_handles_before + 3)
# No open handles if scanning is finished
assert self.outstanding_handles() == 0
# No change when reading the table again
for x in range(10):
self.execute_query("select * from cachefd.simple limit 1", vector=vector)
assert num_handles_after == self.cached_handles()
assert self.outstanding_handles() == 0
def cached_handles(self):
return self.get_agg_metric("impala-server.io.mgr.num-cached-file-handles")
def outstanding_handles(self):
return self.get_agg_metric("impala-server.io.mgr.num-file-handles-outstanding")
def get_agg_metric(self, key, fun=sum):
cluster = ImpalaCluster()
return fun([s.service.get_metric_value(key) for s
in cluster.impalads])
| Python | 0.000001 |
56598776ce6588445cf0d76b5faaea507d5d1405 | Update Labels for consistency | github3/issues/label.py | github3/issues/label.py | # -*- coding: utf-8 -*-
"""Module containing the logic for labels."""
from __future__ import unicode_literals
from json import dumps
from ..decorators import requires_auth
from ..models import GitHubCore
class Label(GitHubCore):
"""A representation of a label object defined on a repository.
See also: http://developer.github.com/v3/issues/labels/
This object has the following attributes::
.. attribute:: color
The hexadecimeal representation of the background color of this label.
.. attribute:: name
The name (display label) for this label.
"""
def _update_attributes(self, label):
self._api = label['url']
self.color = label['color']
self.name = label['name']
self._uniq = self._api
def _repr(self):
return '<Label [{0}]>'.format(self)
def __str__(self):
return self.name
@requires_auth
def delete(self):
"""Delete this label.
:returns:
True if successfully deleted, False otherwise
:rtype:
bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def update(self, name, color):
"""Update this label.
:param str name:
(required), new name of the label
:param str color:
(required), color code, e.g., 626262, no leading '#'
:returns:
True if successfully updated, False otherwise
:rtype:
bool
"""
json = None
if name and color:
if color[0] == '#':
color = color[1:]
json = self._json(self._patch(self._api, data=dumps({
'name': name, 'color': color})), 200)
if json:
self._update_attributes(json)
return True
return False
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from json import dumps
from ..decorators import requires_auth
from ..models import GitHubCore
class Label(GitHubCore):
"""The :class:`Label <Label>` object. Succintly represents a label that
exists in a repository.
See also: http://developer.github.com/v3/issues/labels/
"""
def _update_attributes(self, label):
self._api = self._get_attribute(label, 'url')
#: Color of the label, e.g., 626262
self.color = self._get_attribute(label, 'color')
#: Name of the label, e.g., 'bug'
self.name = self._get_attribute(label, 'name')
self._uniq = self._api
def _repr(self):
return '<Label [{0}]>'.format(self)
def __str__(self):
return self.name
@requires_auth
def delete(self):
"""Delete this label.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def update(self, name, color):
"""Update this label.
:param str name: (required), new name of the label
:param str color: (required), color code, e.g., 626262, no leading '#'
:returns: bool
"""
json = None
if name and color:
if color[0] == '#':
color = color[1:]
json = self._json(self._patch(self._api, data=dumps({
'name': name, 'color': color})), 200)
if json:
self._update_attributes(json)
return True
return False
| Python | 0 |
9626736dc94c85987472b7d7ad5951363883a5dc | Disable Facter plugin if yaml import fails | JsonStats/FetchStats/Plugins/Facter.py | JsonStats/FetchStats/Plugins/Facter.py | import datetime
from JsonStats.FetchStats import Fetcher
import os.path
class Facter(Fetcher):
"""
Facter plugin for `jsonstats`. Returns key-value pairs of general
system information provided by the `facter` command.
Load conditions:
* Plugin will load if the `facter` command is found
Operating behavior:
* Plugin will call `facter` with the `-p` (return `puppet` facts)
option if the `puppet` command is on the system.
Dependencies:
* Facter - http://puppetlabs.com/blog/facter-part-1-facter-101
* PyYAML - http://pyyaml.org/wiki/PyYAML
Optional dependencies:
* Puppet - http://puppetlabs.com/puppet/what-is-puppet
"""
try:
import yaml
except ImportError:
yaml = None
def __init__(self):
self.context = 'facter'
self._cmd = 'facter --yaml 2>/dev/null'
if self.yaml is None:
self._loaded(False, msg='No module named yaml')
return
if os.path.exists('/usr/bin/puppet'):
self._cmd = 'facter -p --yaml 2>/dev/null'
self._load_data()
def _load_data(self):
self._refresh_time = datetime.datetime.utcnow()
try:
output = self._exec(self._cmd)
self.facts = self.yaml.load(output)
self._loaded(True)
except OSError, e:
# Couldn't find facter command, most likely
self._loaded(False, msg=str(e))
except Exception, e:
# Something else did indeed go wrong
self._loaded(False, msg=str(e))
def dump(self):
# poor mans cache, refresh cache in an hour
if (datetime.datetime.utcnow() -
datetime.timedelta(minutes=5)) > self._refresh_time:
self._load_data()
return self.facts
def dump_json(self):
return self.json.dumps(self.dump())
| import datetime
from JsonStats.FetchStats import Fetcher
import os.path
class Facter(Fetcher):
"""
Facter plugin for `jsonstats`. Returns key-value pairs of general
system information provided by the `facter` command.
Load conditions:
* Plugin will load if the `facter` command is found
Operating behavior:
* Plugin will call `facter` with the `-p` (return `puppet` facts)
option if the `puppet` command is on the system.
Dependencies:
* Facter - http://puppetlabs.com/blog/facter-part-1-facter-101
Optional dependencies:
* Puppet - http://puppetlabs.com/puppet/what-is-puppet
"""
import yaml
def __init__(self):
self.context = 'facter'
self._cmd = 'facter --yaml 2>/dev/null'
if os.path.exists('/usr/bin/puppet'):
self._cmd = 'facter -p --yaml 2>/dev/null'
self._load_data()
def _load_data(self):
self._refresh_time = datetime.datetime.utcnow()
try:
output = self._exec(self._cmd)
self.facts = self.yaml.load(output)
self._loaded(True)
except OSError, e:
# Couldn't find facter command, most likely
self._loaded(False, msg=str(e))
except Exception, e:
# Something else did indeed go wrong
self._loaded(False, msg=str(e))
def dump(self):
# poor mans cache, refresh cache in an hour
if (datetime.datetime.utcnow() -
datetime.timedelta(minutes=5)) > self._refresh_time:
self._load_data()
return self.facts
def dump_json(self):
return self.json.dumps(self.dump())
| Python | 0 |
764f785bfa34d99dc2633db78a0d80407e401993 | Implement a client instance | zuora/client.py | zuora/client.py | """
Client for Zuora SOAP API
"""
# TODO:
# - Handle debug
# - Handle error
# - Session policy
from suds.client import Client
from suds.sax.element import Element
from zuora.transport import HttpTransportWithKeepAlive
class ZuoraException(Exception):
"""
Base Zuora Exception.
"""
pass
class Zuora(object):
"""
SOAP Client based on Suds
"""
def __init__(self, wsdl, login, password):
self.wsdl = wsdl
self.login = login
self.password = password
self.session = None
self.wsdl_path = 'file://%s' % os.path.abspath(self.wsdl)
self.client = Client(
self.wsdl_path,
transport=HttpTransportWithKeepAlive())
def __str__(self):
return self.client.__str__()
| """
Client for Zuora SOAP API
"""
# TODO:
# - Handle debug
# - Handle error
# - Session policy
from suds.client import Client
from suds.sax.element import Element
class ZuoraException(Exception):
"""
Base Zuora Exception.
"""
pass
class Zuora(object):
"""
SOAP Client based on Suds
"""
def __init__(self, wsdl, login, password):
self.wsdl = wsdl
self.login = login
self.password = password
| Python | 0.000426 |
88206513d4a04a99832ac8461a3209b2d1d7d2c8 | make test work | tests/test_quality/test_restoringbeam.py | tests/test_quality/test_restoringbeam.py | import os
import unittest2 as unittest
from tkp.quality.restoringbeam import beam_invalid
from tkp.testutil.decorators import requires_data
from tkp import accessors
from tkp.testutil.data import DATAPATH
fits_file = os.path.join(DATAPATH,
'quality/noise/bad/home-pcarrol-msss-3C196a-analysis-band6.corr.fits')
@requires_data(fits_file)
class TestRestoringBeam(unittest.TestCase):
def test_header(self):
image = accessors.open(fits_file)
(semimaj, semimin, theta) = image.beam
self.assertFalse(beam_invalid(semimaj, semimin, theta))
# TODO: this is for FOV calculation and checking
#data = tkp.quality.restoringbeam.parse_fits(image)
#frequency = image.freq_eff
#wavelength = scipy.constants.c/frequency
#d = 32.25
#fwhm = tkp.lofar.beam.fwhm(wavelength, d)
#fov = tkp.lofar.beam.fov(fwhm)
def test_infinite(self):
smaj, smin, theta = float('inf'), float('inf'), float('inf')
self.assertTrue(beam_invalid(smaj, smin, theta))
if __name__ == '__main__':
unittest.main()
| import os
import unittest2 as unittest
from tkp.quality.restoringbeam import beam_invalid
from tkp.testutil.decorators import requires_data
from tkp import accessors
from tkp.testutil.data import DATAPATH
fits_file = os.path.join(DATAPATH,
'quality/noise/bad/home-pcarrol-msss-3C196a-analysis-band6.corr.fits')
@requires_data(fits_file)
class TestRestoringBeam(unittest.TestCase):
def test_header(self):
image = accessors.open(fits_file)
(semimaj, semimin, theta) = image.beam
self.assertFalse(beam_invalid(semimaj, semimin))
# TODO: this is for FOV calculation and checking
#data = tkp.quality.restoringbeam.parse_fits(image)
#frequency = image.freq_eff
#wavelength = scipy.constants.c/frequency
#d = 32.25
#fwhm = tkp.lofar.beam.fwhm(wavelength, d)
#fov = tkp.lofar.beam.fov(fwhm)
def test_infinite(self):
smaj, smin, theta = float('inf'), float('inf'), float('inf')
self.assertTrue(beam_invalid(smaj, smin, theta))
if __name__ == '__main__':
unittest.main()
| Python | 0.000195 |
716ffae543838af6de7b83723ac6048a9f8f390a | improve test list latest articles | knowledge/tests/tests_views.py | knowledge/tests/tests_views.py | from __future__ import unicode_literals
from model_mommy import mommy
from knowledge.base import choices
from knowledge.base.test import ViewTestCase
from knowledge.models import Article
class HomepageTestCase(ViewTestCase):
from knowledge.views import Homepage
view_class = Homepage
view_name = 'knowledge:homepage'
def setUp(self):
self.category = mommy.make_recipe('knowledge.tests.category_with_articles')
mommy.make_recipe('knowledge.tests.category_without_articles')
for article in Article.objects.published():
article.votes.add(token=article.id, rate=choices.VoteChoice.Upvote)
def test_category_list(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['category_list'], [self.category])
def test_have_a_search_form_on_context(self):
from knowledge.forms import SimpleSearchForm
response = self.get()
self.assertEqual(response.context_data['search_form'], SimpleSearchForm)
def test_list_latest_articles(self):
articles = mommy.make_recipe('knowledge.tests.published_article',
category=self.category,
_quantity=5)
response = self.get()
self.assertHttpOK(response)
self.assertEqual(Article.objects.count(), 7)
self.assertSeqEqual(response.context_data['new_articles'], articles)
def test_list_top_viewed_articles(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['new_articles'], Article.objects.published())
def test_list_top_rated_articles(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['top_viewed_articles'], Article.objects.published())
| from __future__ import unicode_literals
from model_mommy import mommy
from knowledge.base import choices
from knowledge.base.test import ViewTestCase
from knowledge.models import Article
class HomepageTestCase(ViewTestCase):
from knowledge.views import Homepage
view_class = Homepage
view_name = 'knowledge:homepage'
def setUp(self):
self.category = mommy.make_recipe('knowledge.tests.category_with_articles')
mommy.make_recipe('knowledge.tests.category_without_articles')
for article in Article.objects.published():
article.votes.add(token=article.id, rate=choices.VoteChoice.Upvote)
def test_category_list(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['category_list'], [self.category])
def test_have_a_search_form_on_context(self):
from knowledge.forms import SimpleSearchForm
response = self.get()
self.assertEqual(response.context_data['search_form'], SimpleSearchForm)
def test_count_published_articles(self):
response = self.get()
category_list = response.context_data['category_list']
self.assertHttpOK(response)
self.assertEqual(category_list[0].get_articles_count(), 1)
def test_list_latest_published_articles(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['new_articles'], Article.objects.published())
def test_list_top_viewed_published_articles(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['new_articles'], Article.objects.published())
def test_list_top_rated_published_articles(self):
response = self.get()
self.assertHttpOK(response)
self.assertSeqEqual(response.context_data['top_viewed_articles'], Article.objects.published())
| Python | 0.000001 |
faa912ed8d2cb68b2f8661ed3550745967f58ba1 | fix broken config path | test/test_transports.py | test/test_transports.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import ssl
from datetime import date
import pytest
from wampy.peers.clients import Client
from wampy.peers.routers import Crossbar
from wampy.roles.callee import callee
from wampy.testing.helpers import wait_for_session, wait_for_registrations
class DateService(Client):
@callee
def get_todays_date(self):
return datetime.date.today().isoformat()
class TestIP4(object):
@pytest.fixture(scope="function")
def config_path(self):
return './wampy/testing/configs/crossbar.json'
def test_ipv4_websocket_connection(self, config_path, router):
with router:
service = DateService(router=router)
with service:
wait_for_registrations(service, 1)
client = Client(router=router)
with client:
result = client.rpc.get_todays_date()
today = date.today()
assert result == today.isoformat()
class TestIP6(object):
@pytest.fixture(scope="function")
def config_path(self):
return './wampy/testing/configs/crossbar.ipv6.json'
def test_ipv6_websocket_connection(self, config_path, router):
with router:
service = DateService(router=router)
with service:
wait_for_registrations(service, 1)
client = Client(router=router)
with client:
result = client.rpc.get_todays_date()
today = date.today()
assert result == today.isoformat()
def test_ipv4_secure_websocket_connection():
try:
ssl.PROTOCOL_TLSv1_2
except AttributeError:
pytest.skip('Python Environment does not support TLS')
# note that TLS not supported by crossbar on ipv6
crossbar = Crossbar(
config_path='./wampy/testing/configs/crossbar.tls.json',
crossbar_directory='./',
)
with crossbar as router:
with DateService(router=router, use_tls=True) as service:
wait_for_registrations(service, 1)
client = Client(router=router, use_tls=True)
with client:
wait_for_session(client)
result = client.rpc.get_todays_date()
today = date.today()
assert result == today.isoformat()
| # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import ssl
from datetime import date
import pytest
from wampy.peers.clients import Client
from wampy.peers.routers import Crossbar
from wampy.roles.callee import callee
from wampy.testing.helpers import wait_for_session, wait_for_registrations
class DateService(Client):
@callee
def get_todays_date(self):
return datetime.date.today().isoformat()
class TestIP4(object):
@pytest.fixture(scope="function")
def config_path(self):
return './wampy/testing/configs/crossbar.config.ipv4.json'
def test_ipv4_websocket_connection(self, config_path, router):
with router:
service = DateService(router=router)
with service:
wait_for_registrations(service, 1)
client = Client(router=router)
with client:
result = client.rpc.get_todays_date()
today = date.today()
assert result == today.isoformat()
class TestIP6(object):
@pytest.fixture(scope="function")
def config_path(self):
return './wampy/testing/configs/crossbar.ipv6.json'
def test_ipv6_websocket_connection(self, config_path, router):
with router:
service = DateService(router=router)
with service:
wait_for_registrations(service, 1)
client = Client(router=router)
with client:
result = client.rpc.get_todays_date()
today = date.today()
assert result == today.isoformat()
def test_ipv4_secure_websocket_connection():
try:
ssl.PROTOCOL_TLSv1_2
except AttributeError:
pytest.skip('Python Environment does not support TLS')
# note that TLS not supported by crossbar on ipv6
crossbar = Crossbar(
config_path='./wampy/testing/configs/crossbar.tls.json',
crossbar_directory='./',
)
with crossbar as router:
with DateService(router=router, use_tls=True) as service:
wait_for_registrations(service, 1)
client = Client(router=router, use_tls=True)
with client:
wait_for_session(client)
result = client.rpc.get_todays_date()
today = date.today()
assert result == today.isoformat()
| Python | 0.000004 |
1112495ae59542ad76d1cc72f40ab91e7e562f1c | Simplify mac_epoch_diff | Lib/fontTools/ttLib/tables/_h_e_a_d.py | Lib/fontTools/ttLib/tables/_h_e_a_d.py | from __future__ import print_function, division
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval, num2binary, binary2num
from . import DefaultTable
import time
import calendar
headFormat = """
> # big endian
tableVersion: 16.16F
fontRevision: 16.16F
checkSumAdjustment: I
magicNumber: I
flags: H
unitsPerEm: H
created: Q
modified: Q
xMin: h
yMin: h
xMax: h
yMax: h
macStyle: H
lowestRecPPEM: H
fontDirectionHint: h
indexToLocFormat: h
glyphDataFormat: h
"""
class table__h_e_a_d(DefaultTable.DefaultTable):
dependencies = ['maxp', 'loca']
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(headFormat, data, self)
if rest:
# this is quite illegal, but there seem to be fonts out there that do this
assert rest == "\0\0"
def compile(self, ttFont):
self.modified = int(time.time() - mac_epoch_diff)
data = sstruct.pack(headFormat, self)
return data
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
formatstring, names, fixes = sstruct.getformat(headFormat)
for name in names:
value = getattr(self, name)
if name in ("created", "modified"):
try:
value = time.asctime(time.gmtime(max(0, value + mac_epoch_diff)))
except ValueError:
value = time.asctime(time.gmtime(0))
if name in ("magicNumber", "checkSumAdjustment"):
if value < 0:
value = value + 0x100000000
value = hex(value)
if value[-1:] == "L":
value = value[:-1]
elif name in ("macStyle", "flags"):
value = num2binary(value, 16)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
if name in ("created", "modified"):
value = calendar.timegm(time.strptime(value)) - mac_epoch_diff
elif name in ("macStyle", "flags"):
value = binary2num(value)
else:
value = safeEval(value)
setattr(self, name, value)
# Difference between the original Mac epoch (1904) to the epoch on this machine.
mac_epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
_months = [' ', 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug',
'sep', 'oct', 'nov', 'dec']
_weekdays = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
| from __future__ import print_function, division
from fontTools.misc.py23 import *
from fontTools.misc import sstruct
from fontTools.misc.textTools import safeEval, num2binary, binary2num
from . import DefaultTable
import time
import calendar
headFormat = """
> # big endian
tableVersion: 16.16F
fontRevision: 16.16F
checkSumAdjustment: I
magicNumber: I
flags: H
unitsPerEm: H
created: Q
modified: Q
xMin: h
yMin: h
xMax: h
yMax: h
macStyle: H
lowestRecPPEM: H
fontDirectionHint: h
indexToLocFormat: h
glyphDataFormat: h
"""
class table__h_e_a_d(DefaultTable.DefaultTable):
dependencies = ['maxp', 'loca']
def decompile(self, data, ttFont):
dummy, rest = sstruct.unpack2(headFormat, data, self)
if rest:
# this is quite illegal, but there seem to be fonts out there that do this
assert rest == "\0\0"
def compile(self, ttFont):
self.modified = int(time.time() - mac_epoch_diff)
data = sstruct.pack(headFormat, self)
return data
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
formatstring, names, fixes = sstruct.getformat(headFormat)
for name in names:
value = getattr(self, name)
if name in ("created", "modified"):
try:
value = time.asctime(time.gmtime(max(0, value + mac_epoch_diff)))
except ValueError:
value = time.asctime(time.gmtime(0))
if name in ("magicNumber", "checkSumAdjustment"):
if value < 0:
value = value + 0x100000000
value = hex(value)
if value[-1:] == "L":
value = value[:-1]
elif name in ("macStyle", "flags"):
value = num2binary(value, 16)
writer.simpletag(name, value=value)
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
value = attrs["value"]
if name in ("created", "modified"):
value = calendar.timegm(time.strptime(value)) - mac_epoch_diff
elif name in ("macStyle", "flags"):
value = binary2num(value)
else:
value = safeEval(value)
setattr(self, name, value)
def calc_mac_epoch_diff():
"""calculate the difference between the original Mac epoch (1904)
to the epoch on this machine.
"""
safe_epoch_t = (1972, 1, 1, 0, 0, 0, 0, 0, 0)
safe_epoch = time.mktime(safe_epoch_t) - time.timezone
# This assert fails in certain time zones, with certain daylight settings
#assert time.gmtime(safe_epoch)[:6] == safe_epoch_t[:6]
seconds1904to1972 = 60 * 60 * 24 * (365 * (1972-1904) + 17) # thanks, Laurence!
return int(safe_epoch - seconds1904to1972)
mac_epoch_diff = calc_mac_epoch_diff()
_months = [' ', 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug',
'sep', 'oct', 'nov', 'dec']
_weekdays = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
| Python | 0.999998 |
c69ac39ee650445533d31a4a476f6f3b14cb43ca | Update roles.py | site/models/roles.py | site/models/roles.py | import datetime, re;
from sqlalchemy.orm import validates;
from server import DB, FlaskServer;
class Roles(DB.Model):
id = DB.Column(DB.Integer, primary_key=True, autoincrement=True);
name = DB.Column(DB.String(20);
district_id = DB.relationship(DB.Integer, DB.ForeignKey('district.id'));
created_by = DB.relationship(DB.Integer, DB.ForeignKey('users.id'));
created_at = DB.Column(DB.DateTime);
updated_by = DB.relationship(DB.Integer, DB.ForeignKey('users.id'));
updated_at = DB.Column(DB.DateTime, nullable=True);
def __init__(self, name, created_at, updated_at):
self.name = name;
self.created_at = datetime.datetime.now();
self.updated_at = self.created_at;
| import datetime, re;
from sqlalchemy.orm import validates;
from server import DB, FlaskServer;
class Roles(DB.Model):
id = DB.Column(DB.Integer, primary_key=True, autoincrement=True);
name = DB.Column(DB.String(20);
district_id = DB.relationship(DB.Integer, DB.ForeignKey('district.id'));
created_by = DB.relationship(DB.Integer, DB.ForeignKey('users.id'));
created_at = DB.Column(DB.DateTime);
updated_by = DB.relationship(DB.Integer, DB.ForeignKey('users.id'), nullable=True);
updated_at = DB.Column(DB.DateTime, nullable=True);
def __init__(self, name, created_at, updated_at):
self.name = name;
self.created_at = datetime.datetime.now();
self.updated_at = self.created_at;
| Python | 0.000001 |
a39dcc1548bea483225635292c5e1f489a3288a2 | support direct line navigation shorthand (submission from RUBY-12579 improved) #RUBY-12579 fixed | platform/platform-resources/src/launcher.py | platform/platform-resources/src/launcher.py | #!/usr/bin/python
import socket
import struct
import sys
import os
import os.path
import time
# see com.intelij.idea.SocketLock for the server side of this interface
RUN_PATH = '$RUN_PATH$'
CONFIG_PATH = '$CONFIG_PATH$'
args = []
skip_next = False
for arg in sys.argv[1:]:
if arg == '-h' or arg == '-?' or arg == '--help':
print 'Usage: ' + sys.argv[0] + ' [-h|-?|--help] [-l|--line <line_number>] [file_path[:<line_number>]]'
exit( 0 )
elif arg == '-l' or arg == '--line':
args.append(arg)
skip_next = True
elif skip_next:
args.append(arg)
skip_next = False
else:
if ':' in arg:
filepath, line_number = arg.rsplit( ':', 1 )
if line_number.isdigit():
args.append( '-l' )
args.append( line_number )
args.append( os.path.abspath( filepath ) )
else:
args.append(os.path.abspath(arg))
else:
args.append(os.path.abspath(arg))
def launch_with_port(port):
found = False
s = socket.socket()
s.settimeout(0.3)
try:
s.connect(('127.0.0.1', port))
except:
return False
while True:
try:
path_len = struct.unpack(">h", s.recv(2))[0]
path = s.recv(path_len)
path = os.path.abspath(path)
if os.path.abspath(path) == os.path.abspath(CONFIG_PATH):
found = True
break
except:
break
if found:
if args:
cmd = "activate " + "\0".join(args)
encoded = struct.pack(">h", len(cmd)) + cmd
s.send(encoded)
time.sleep(0.5) # don't close socket immediately
return True
return False
port = -1
try:
f = open(os.path.join(CONFIG_PATH, 'port'))
port = int(f.read())
except Exception:
type, value, traceback = sys.exc_info()
print(value)
port = -1
if port == -1:
# SocketLock actually allows up to 50 ports, but the checking takes too long
for port in range(6942, 6942+10):
if launch_with_port(port): exit()
else:
if launch_with_port(port): exit()
if sys.platform == "darwin":
# Mac OS: RUN_PATH is *.app path
if len(args):
args.insert(0, "--args")
os.execvp("open", ["-a", RUN_PATH] + args)
else:
# unix common
bin_dir, bin_file = os.path.split(RUN_PATH)
os.chdir(bin_dir)
os.execv(bin_file, [bin_file] + args)
| #!/usr/bin/python
import socket
import struct
import sys
import os
import os.path
import time
# see com.intelij.idea.SocketLock for the server side of this interface
RUN_PATH = '$RUN_PATH$'
CONFIG_PATH = '$CONFIG_PATH$'
args = []
skip_next = False
for arg in sys.argv[1:]:
if arg == '-l' or arg == '--line':
args.append(arg)
skip_next = True
elif skip_next:
args.append(arg)
skip_next = False
else:
args.append(os.path.abspath(arg))
def launch_with_port(port):
found = False
s = socket.socket()
s.settimeout(0.3)
try:
s.connect(('127.0.0.1', port))
except:
return False
while True:
try:
path_len = struct.unpack(">h", s.recv(2))[0]
path = s.recv(path_len)
path = os.path.abspath(path)
if os.path.abspath(path) == os.path.abspath(CONFIG_PATH):
found = True
break
except:
break
if found:
if args:
cmd = "activate " + "\0".join(args)
encoded = struct.pack(">h", len(cmd)) + cmd
s.send(encoded)
time.sleep(0.5) # don't close socket immediately
return True
return False
port = -1
try:
f = open(os.path.join(CONFIG_PATH, 'port'))
port = int(f.read())
except Exception:
type, value, traceback = sys.exc_info()
print(value)
port = -1
if port == -1:
# SocketLock actually allows up to 50 ports, but the checking takes too long
for port in range(6942, 6942+10):
if launch_with_port(port): exit()
else:
if launch_with_port(port): exit()
if sys.platform == "darwin":
# Mac OS: RUN_PATH is *.app path
if len(args):
args.insert(0, "--args")
os.execvp("open", ["-a", RUN_PATH] + args)
else:
# unix common
bin_dir, bin_file = os.path.split(RUN_PATH)
os.chdir(bin_dir)
os.execv(bin_file, [bin_file] + args)
| Python | 0 |
8a5d931cb66dc452e9db6f52ac7fcc371a855608 | Update curate_teleco_performance_data.py | lab-01/cell-tower-anomaly-detection/00-scripts/curate_teleco_performance_data.py | lab-01/cell-tower-anomaly-detection/00-scripts/curate_teleco_performance_data.py | # ======================================================================================
# ABOUT
# In this PySpark script, we augment the Telecom data with curated customer data (prior
# job), curate it and persist to GCS
# ======================================================================================
import configparser
from datetime import datetime
import os
import json
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col, substring, lit, when, avg
from pyspark.sql import functions as F
from pyspark.sql.functions import input_file_name
import random
from pyspark.sql.types import *
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, dayofweek, date_format
from pyspark import SparkContext, SparkConf, SQLContext
from google.cloud import storage
import sys
# Parse arguments
sourceBucketNm=sys.argv[1]
# Source data definition
customerCuratedDataDir="gs://"+sourceBucketNm+"/cell-tower-anomaly-detection/output_data/customer_augmented/part*"
telcoCustomerChurnDataDir="gs://"+sourceBucketNm+"/cell-tower-anomaly-detection/01-datasets/telecom_customer_churn_data.csv"
# Output directory declaration
outputGCSURI="gs://"+sourceBucketNm+"/cell-tower-anomaly-detection/output_data"
# Get or create a Spark session
spark =SparkSession.builder.appName("cell_tower_performance_dataset-exploration").getOrCreate()
# Read the curated customer data (blended with services threshold data) from GCS
curatedCustomerDataDF = spark.read.format("parquet").option("header", True).option("inferschema",True).load(customerCuratedDataDir)
# Read the telecom customer churn data from GCS
telecomCustomerChurnRawDataDF = spark.read.format("csv").option("header", True).option("inferschema",True).load(telcoCustomerChurnDataDir)
telecomCustomerChurnRawDataDF.printSchema()
# Subset the telecom customer churn/performance data for relevant attributes
# ... Create subset
telecomCustomerChurnSubsetDF = telecomCustomerChurnRawDataDF.selectExpr("roam_Mean","change_mou","drop_vce_Mean","drop_dat_Mean","blck_vce_Mean","blck_dat_Mean","plcd_vce_Mean","plcd_dat_Mean","comp_vce_Mean","comp_dat_Mean","peak_vce_Mean","peak_dat_Mean","mou_peav_Mean","mou_pead_Mean","opk_vce_Mean","opk_dat_Mean","mou_opkv_Mean","mou_opkd_Mean","drop_blk_Mean","callfwdv_Mean","callwait_Mean","churn","months","uniqsubs","actvsubs","area","dualband","forgntvl","Customer_ID")
# ... Create a column called customer_ID_Short that is a substring of the original Customer_ID
telecomCustomerChurnFinalDF=telecomCustomerChurnSubsetDF.withColumn('customer_ID_Short', substring('Customer_ID', 4,7))
# ... Rename the Customer_ID column, customer_ID_original
telecomCustomerChurnFinalDF=telecomCustomerChurnFinalDF.withColumnRenamed('Customer_ID', 'customer_ID_original')
# ... Rename the newly added customer_ID_Short column, customer_ID
telecomCustomerChurnFinalDF=telecomCustomerChurnFinalDF.withColumnRenamed('customer_ID_Short', 'customer_ID')
# ... Quick visual
telecomCustomerChurnFinalDF.show(10,truncate=False)
# Join the curated customer data with the telecom network performance data based on customer ID
consolidatedDataDF = curatedCustomerDataDF.join(telecomCustomerChurnFinalDF, curatedCustomerDataDF.customerID == telecomCustomerChurnFinalDF.customer_ID, "inner").drop(telecomCustomerChurnFinalDF.customer_ID).drop(telecomCustomerChurnFinalDF.churn)
consolidatedDataDF.show(truncate=False)
# Persist the augmented telecom tower performance data to GCS
consolidatedDataDF.write.parquet(os.path.join(outputGCSURI, "telco_performance_augmented"), mode = "overwrite")
| # ======================================================================================
# ABOUT
# In this PySpark script, we augment the Telecom data with curated customer data (prior
# job), curate it and persist to GCS
# ======================================================================================
import configparser
from datetime import datetime
import os
import json
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col, substring, lit, when, avg
from pyspark.sql import functions as F
from pyspark.sql.functions import input_file_name
import random
from pyspark.sql.types import *
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, dayofweek, date_format
from pyspark import SparkContext, SparkConf, SQLContext
from google.cloud import storage
import sys
# Parse arguments
sourceBucketNm=sys.argv[1]
# Source data definition
customerCuratedDataDir="gs://"+sourceBucketNm+"/cell-tower-anomaly-detection/output_data/customer_augmented/part*"
telcoCustomerChurnDataDir="gs://"+sourceBucketNm+"/cell-tower-anomaly-detection/01-datasets/telecom_customer_churn_data.csv"
# Output directory declaration
outputGCSURI="gs://"+sourceBucketNm+"/cell-tower-anomaly-detection/output_data"
# Get or create a Spark session
spark =SparkSession.builder.appName("cell_tower_performance_dataset-exploration").getOrCreate()
# Read the curated customer data (blended with services threshold data) from GCS
curatedCustomerDataDF = spark.read.format("parquet").option("header", True).option("inferschema",True).load(customerCuratedDataDir)
# Read the telecom customer churn data from GCS
telecomCustomerChurnRawDataDF = spark.read.format("csv").option("header", True).option("inferschema",True).load(telcoCustomerChurnDataDir)
telecomCustomerChurnRawDataDF.printSchema()
# Subset the telecom customer churn/performance data for relevant attributes
# ... Create subset
telecomCustomerChurnSubsetDF = telecomCustomerChurnRawDataDF.selectExpr("roam_Mean","change_mou","drop_vce_Mean","drop_dat_Mean","blck_vce_Mean","blck_dat_Mean","plcd_vce_Mean","plcd_dat_Mean","comp_vce_Mean","comp_dat_Mean","peak_vce_Mean","peak_dat_Mean","mou_peav_Mean","mou_pead_Mean","opk_vce_Mean","opk_dat_Mean","mou_opkv_Mean","mou_opkd_Mean","drop_blk_Mean","callfwdv_Mean","callwait_Mean","churn","months","uniqsubs","actvsubs","area","dualband","forgntvl","Customer_ID")
# ... Create a column called customer_ID_Short that is a substring of the original Customer_ID
telecomCustomerChurnFinalDF=telecomCustomerChurnSubsetDF.withColumn('customer_ID_Short', substring('Customer_ID', 4,7))
# ... Rename the Customer_ID column, customer_ID_original
telecomCustomerChurnFinalDF.withColumnRenamed('Customer_ID', 'customer_ID_original')
# ... Rename the newly added customer_ID_Short column, customer_ID
telecomCustomerChurnFinalDF.withColumnRenamed('customer_ID_Short', 'customer_ID')
# ... Quick visual
telecomCustomerChurnFinalDF.show(10,truncate=False)
# Join the curated customer data with the telecom network performance data based on customer ID
consolidatedDataDF = curatedCustomerDataDF.join(telecomCustomerChurnFinalDF, curatedCustomerDataDF.customerID == telecomCustomerChurnFinalDF.customer_ID, "inner").drop(telecomCustomerChurnFinalDF.customer_ID).drop(telecomCustomerChurnFinalDF.churn)
consolidatedDataDF.show(truncate=False)
# Persist the augmented telecom tower performance data to GCS
consolidatedDataDF.write.parquet(os.path.join(outputGCSURI, "telco_performance_augmented"), mode = "overwrite")
| Python | 0 |
ea542282911cbc7b3cf594a20175fbddcbd75a89 | Use absolute import | restapi_logging_handler/__init__.py | restapi_logging_handler/__init__.py | from __future__ import absolute_import
from restapi_logging_handler.loggly_handler import LogglyHandler
from restapi_logging_handler.restapi_logging_handler import RestApiHandler
| from loggly_handler import LogglyHandler
from restapi_logging_handler import RestApiHandler
| Python | 0.000045 |
128c54529da80d5f84a0bf8a9bca6e83ed14a342 | Delete unused import | simplesqlite/loader/html/formatter.py | simplesqlite/loader/html/formatter.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
import bs4
import dataproperty
from ..constant import TableNameTemplate as tnt
from ..data import TableData
from ..formatter import TableFormatter
class HtmlTableFormatter(TableFormatter):
def __init__(self, source_data):
super(HtmlTableFormatter, self).__init__(source_data)
try:
self.__soup = bs4.BeautifulSoup(self._source_data, "lxml")
except bs4.FeatureNotFound:
self.__soup = bs4.BeautifulSoup(self._source_data, "html.parser")
def to_table_data(self):
self._validate_source_data()
for table in self.__soup.find_all("table"):
tabledata = self.__parse_html(table)
yield tabledata
def _make_table_name(self):
table_name = self._loader.make_table_name()
key = self.__table_id
if dataproperty.is_empty_string(key):
key = "{:s}{:d}".format(
self._loader.format_name,
self._loader.get_format_table_count())
return table_name.replace(tnt.KEY, key)
def __parse_html(self, table):
header_list = []
data_matrix = []
self.__table_id = table.get("id")
row_list = table.find_all("tr")
for row in row_list:
col_list = row.find_all("td")
if dataproperty.is_empty_sequence(col_list):
th_list = row.find_all("th")
if dataproperty.is_empty_sequence(th_list):
continue
header_list = [row.text.strip() for row in th_list]
continue
data_list = [value.text.strip() for value in col_list]
data_matrix.append(data_list)
self._loader.inc_table_count()
return TableData(
self._make_table_name(), header_list, data_matrix)
| # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
import bs4
import dataproperty
from ..constant import TableNameTemplate as tnt
from ..data import TableData
from ..error import InvalidDataError
from ..formatter import TableFormatter
class HtmlTableFormatter(TableFormatter):
def __init__(self, source_data):
super(HtmlTableFormatter, self).__init__(source_data)
try:
self.__soup = bs4.BeautifulSoup(self._source_data, "lxml")
except bs4.FeatureNotFound:
self.__soup = bs4.BeautifulSoup(self._source_data, "html.parser")
def to_table_data(self):
self._validate_source_data()
for table in self.__soup.find_all("table"):
tabledata = self.__parse_html(table)
yield tabledata
def _make_table_name(self):
table_name = self._loader.make_table_name()
key = self.__table_id
if dataproperty.is_empty_string(key):
key = "{:s}{:d}".format(
self._loader.format_name,
self._loader.get_format_table_count())
return table_name.replace(tnt.KEY, key)
def __parse_html(self, table):
header_list = []
data_matrix = []
self.__table_id = table.get("id")
row_list = table.find_all("tr")
for row in row_list:
col_list = row.find_all("td")
if dataproperty.is_empty_sequence(col_list):
th_list = row.find_all("th")
if dataproperty.is_empty_sequence(th_list):
continue
header_list = [row.text.strip() for row in th_list]
continue
data_list = [value.text.strip() for value in col_list]
data_matrix.append(data_list)
self._loader.inc_table_count()
return TableData(
self._make_table_name(), header_list, data_matrix)
| Python | 0.000001 |
de69aa9c04ea34bd31b5cf17e8e3cfdf17b0b9df | Naive user/password login w.o. encryption | 1wire/publish.py | 1wire/publish.py | #!/usr/bin/env python
import os
import argparse
import time
import threading
from Queue import Queue
import mosquitto
queue = Queue(100)
def main(host, port, user, password, sensors):
print "#######################"
print "Temperature poller v0.2"
print "#######################"
print "Using sensors:"
pollers = []
for sensor in sensors:
print " {sensor}".format(sensor=sensor)
p = PollerThread(sensor)
p.start()
pollers.append(p)
publisher = PublisherThread(host, port, user=user, password=password)
publisher.start()
try:
raw_input("Press key to exit")
except (KeyboardInterrupt, SystemExit):
pass
finally:
for poller in pollers:
poller.stop()
publisher.stop()
# Make sure publisher is not stuck waiting for data
queue.put(None)
class StoppableThread(threading.Thread):
def __init__(self):
super(StoppableThread, self).__init__()
self._stop = threading.Event()
def stop(self):
self._stop.set()
def is_stopped(self):
return self._stop.isSet()
class PollerThread(StoppableThread):
def __init__(self, sensor):
super(PollerThread, self).__init__()
self.sensor = sensor
self.id = os.path.dirname(sensor)
def run(self):
global queue
while not self.is_stopped():
temp = self.get_temp()
queue.put((self.id, temp))
time.sleep(1)
def get_temp(self):
temp = -1
with open(self.sensor, 'rb') as s:
temp = s.read()
return temp
class PublisherThread(StoppableThread):
def __init__(self, host, port, user=None, password=None):
super(PublisherThread, self).__init__()
self.mqttc = mosquitto.Mosquitto("python_pub")
self.mqttc.will_set("/event/dropped", "Sorry, I seem to have died.")
if user is not None and password is not None:
self.mqttc.username_pw_set(user, password)
self.mqttc.connect(host, port, 60, True)
def run(self):
global queue
while not self.is_stopped():
ret = queue.get()
if ret:
(id, temp) = ret
queue.task_done()
self.mqttc.publish("iot_lab/temp", "{id}:{temp}".format(id=id, temp=temp))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-H', '--host', required=True)
parser.add_argument('-P', '--port', default="1883")
parser.add_argument('-u', '--user', default=None)
parser.add_argument('-p', '--password', default=None)
parser.add_argument('-s', '--sensors', default=[], action='append', dest="sensors", help='path(s) to sensors, separated by space')
args = parser.parse_args()
main(args.host, args.port, args.user, args.password, args.sensors)
| #!/usr/bin/env python
import os
import argparse
import time
import threading
from Queue import Queue
import mosquitto
queue = Queue(100)
def main(host, port, sensors):
print "#######################"
print "Temperature poller v0.2"
print "#######################"
print "Using sensors:"
pollers = []
for sensor in sensors:
print " {sensor}".format(sensor=sensor)
p = PollerThread(sensor)
p.start()
pollers.append(p)
publisher = PublisherThread(host, port)
publisher.start()
try:
raw_input("Press key to exit")
except (KeyboardInterrupt, SystemExit):
pass
finally:
for poller in pollers:
poller.stop()
publisher.stop()
# Make sure publisher is not stuck waiting for data
queue.put(None)
class StoppableThread(threading.Thread):
def __init__(self):
super(StoppableThread, self).__init__()
self._stop = threading.Event()
def stop(self):
self._stop.set()
def is_stopped(self):
return self._stop.isSet()
class PollerThread(StoppableThread):
def __init__(self, sensor):
super(PollerThread, self).__init__()
self.sensor = sensor
self.id = os.path.dirname(sensor)
def run(self):
global queue
while not self.is_stopped():
temp = self.get_temp()
queue.put((self.id, temp))
time.sleep(1)
def get_temp(self):
temp = -1
with open(self.sensor, 'rb') as s:
temp = s.read()
return temp
class PublisherThread(StoppableThread):
def __init__(self, host, port):
super(PublisherThread, self).__init__()
self.mqttc = mosquitto.Mosquitto("python_pub")
self.mqttc.will_set("/event/dropped", "Sorry, I seem to have died.")
self.mqttc.connect(host, port, 60, True)
def run(self):
global queue
while not self.is_stopped():
ret = queue.get()
if ret:
(id, temp) = ret
queue.task_done()
self.mqttc.publish("iot_lab/temp", "{id}:{temp}".format(id=id, temp=temp))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-H', '--host', required=True)
parser.add_argument('-P', '--port', default="1883")
parser.add_argument('-s', '--sensors', default=[], action='append', dest="sensors", help='path(s) to sensors, separated by space')
args = parser.parse_args()
main(args.host, args.port, args.sensors)
| Python | 0.99999 |
7f02d1f2b23bbf27e99d87ef23c491823875c3d1 | fix bin none subprocess.TimeoutExpired | bin/virt.py | bin/virt.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import yaml
import subprocess
import os
import sys
def proc(cmd,time = 120,sh = True ):
print("$".format(cmd))
p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=sh)
outs, errs = p.communicate(timeout=time)
return outs,errs,p
ROOT_PATH=os.path.dirname(__file__)
print(proc("sudo apt-get update")[0])
print(proc("sudo apt-get install -qq sshpass")[0])
print(proc("ssh-keygen -b 2048 -t rsa -f $HOME/.ssh/id_rsa -q -N \"\"")[0])
print(proc("docker info")[0])
print(proc("docker version")[0])
with open('meta/main.yml', 'r') as f:
doc = yaml.load(f)
for i in doc["galaxy_info"]["platforms"]:
distrib = i["name"]
for x in i["versions"]:
dockerfile = "{}/../dockerfile/{}/{}/Dockerfile".format(ROOT_PATH,distrib,x)
if os.path.exists(dockerfile):
print(proc("docker build -f {} -t {}_{} .".format(dockerfile,distrib,x))[0])
print(proc("docker run -d --cap-add=SYS_ADMIN -it -v /sys/fs/cgroup:/sys/fs/cgroup:ro {}_{}".format(distrib,x))[0])
else:
print("Critical error. Not found docker files {}".format(dockerfile))
sys.exit(1)
proc("sleep 10")
proc("docker inspect --format '{{.Config.Image}} ansible_ssh_host={{.NetworkSettings.IPAddress}}' `docker ps -q` >> /etc/ansible/hosts")
for item in proc("docker inspect --format '{{ .NetworkSettings.IPAddress }}' \`docker ps -q\`")[0]:
proc("ssh-keyscan -H {} >> ~/.ssh/known_hosts".format(item))
proc("sshpass -p '000000' ssh-copy-id root@{}".format(item))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import yaml
import subprocess
import os
import sys
def proc(cmd,time = 120,sh = True ):
print("$".format(cmd))
p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE, shell=sh)
try:
outs, errs = p.communicate(timeout=time)
except subprocess.TimeoutExpired:
p.kill()
outs, errs = p.communicate()
return outs,errs,p
ROOT_PATH=os.path.dirname(__file__)
print(proc("sudo apt-get update")[0])
print(proc("sudo apt-get install -qq sshpass")[0])
print(proc("ssh-keygen -b 2048 -t rsa -f $HOME/.ssh/id_rsa -q -N \"\"")[0])
print(proc("docker info")[0])
print(proc("docker version")[0])
with open('meta/main.yml', 'r') as f:
doc = yaml.load(f)
for i in doc["galaxy_info"]["platforms"]:
distrib = i["name"]
for x in i["versions"]:
dockerfile = "{}/../dockerfile/{}/{}/Dockerfile".format(ROOT_PATH,distrib,x)
if os.path.exists(dockerfile):
print(proc("docker build -f {} -t {}_{} .".format(dockerfile,distrib,x))[0])
print(proc("docker run -d --cap-add=SYS_ADMIN -it -v /sys/fs/cgroup:/sys/fs/cgroup:ro {}_{}".format(distrib,x))[0])
else:
print("Critical error. Not found docker files {}".format(dockerfile))
sys.exit(1)
proc("sleep 10")
proc("docker inspect --format '{{.Config.Image}} ansible_ssh_host={{.NetworkSettings.IPAddress}}' `docker ps -q` >> /etc/ansible/hosts")
for item in proc("docker inspect --format '{{ .NetworkSettings.IPAddress }}' \`docker ps -q\`")[0]:
proc("ssh-keyscan -H {} >> ~/.ssh/known_hosts".format(item))
proc("sshpass -p '000000' ssh-copy-id root@{}".format(item))
| Python | 0.000001 |
45e9ddce96b4fdadca63a50bf2808c7f98520d99 | print query on error | data_upload/util/bq_wrapper.py | data_upload/util/bq_wrapper.py | '''
Created on Jan 22, 2017
Copyright 2017, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: michael
'''
from google.cloud import bigquery
def query_bq_table(query, use_legacy, project, log):
log.info('\t\tquerying bq: %s' % query)
client = bigquery.Client(project=project)
query_results = client.run_sync_query(query)
# Use standard SQL syntax for queries.
# See: https://cloud.google.com/bigquery/sql-reference/
query_results.use_legacy_sql = use_legacy
try:
query_results.run()
except:
log.exception('problem with query:\n{}'.format(query))
raise
log.info('\t\tdone querying bq: %s' % query)
return query_results
def fetch_paged_results(query_results, fetch_count, project_name, page_token, log):
log.info('\t\trequesting %d rows %s' % (fetch_count, (' for ' + project_name) if project_name else ''))
rows, total_rows, page_token = query_results.fetch_data(
max_results=fetch_count,
page_token=page_token)
log.info('\t\tfetched %d rows %s' % (len(rows), (' for ' + project_name) if project_name else ''))
return total_rows, rows, page_token
| '''
Created on Jan 22, 2017
Copyright 2017, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: michael
'''
from google.cloud import bigquery
def query_bq_table(query, use_legacy, project, log):
log.info('\t\tquerying bq: %s' % query)
client = bigquery.Client(project=project)
query_results = client.run_sync_query(query)
# Use standard SQL syntax for queries.
# See: https://cloud.google.com/bigquery/sql-reference/
query_results.use_legacy_sql = use_legacy
query_results.run()
log.info('\t\tdone querying bq: %s' % query)
return query_results
def fetch_paged_results(query_results, fetch_count, project_name, page_token, log):
log.info('\t\trequesting %d rows %s' % (fetch_count, (' for ' + project_name) if project_name else ''))
rows, total_rows, page_token = query_results.fetch_data(
max_results=fetch_count,
page_token=page_token)
log.info('\t\tfetched %d rows %s' % (len(rows), (' for ' + project_name) if project_name else ''))
return total_rows, rows, page_token
| Python | 0.000446 |
d590307b0d59ac7163016197e3de0e8bced377d2 | Fix form typo | account_verification_flask/forms/forms.py | account_verification_flask/forms/forms.py | from flask_wtf import Form
from wtforms import TextField, PasswordField, IntegerField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class RegisterForm(Form):
name = TextField(
'Tell us your name',
validators = [DataRequired(message = "Name is required"), Length(min = 3,message = "Name must greater than 3 chars")]
)
email = TextField(
'Enter your E-mail',
validators = [DataRequired("E-mail is required"), Email(message = "Invalid E-mail address")]
)
password = PasswordField(
'Password',
validators = [DataRequired("Password is required")]
)
country_code = TextField(
'Country Code',
validators = [DataRequired("Country code is required"), Length(min = 1, max = 4, message = "Country must be between 1 and 4 chars")]
)
phone_number = IntegerField(
'Phone Number',
validators = [DataRequired("Valid phone number is required")]
)
class ResendCodeForm(Form):
email = TextField(
'E-mail',
validators = [DataRequired("E-mail is required"), Email(message = "Invalid E-mail address")]
)
class VerifyCodeForm(ResendCodeForm):
verification_code = TextField(
'Verification Code',
validators = [DataRequired("Verification code is required")]
)
| from flask_wtf import Form
from wtforms import TextField, PasswordField, IntegerField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class RegisterForm(Form):
name = TextField(
'Tell us your name',
validators = [DataRequired(message = "Name is required"), Length(min = 3,message = "Name must greater than 3 chars")]
)
email = TextField(
'Enter your E-mail',
validators = [DataRequired("E-mail is required"), Email(message = "Invalid E-mail address")]
)
password = PasswordField(
'Password',
validators = [DataRequired("Password is required")]
)
country_code = TextField(
'Coundtry Code',
validators = [DataRequired("Country code is required"), Length(min = 1, max = 4, message = "Country must be between 1 and 4 chars")]
)
phone_number = IntegerField(
'Phone Number',
validators = [DataRequired("Valid phone number is required")]
)
class ResendCodeForm(Form):
email = TextField(
'E-mail',
validators = [DataRequired("E-mail is required"), Email(message = "Invalid E-mail address")]
)
class VerifyCodeForm(ResendCodeForm):
verification_code = TextField(
'Verification Code',
validators = [DataRequired("Verification code is required")]
)
| Python | 0.000191 |
a1826e5507a083cdcb906c411e97031bb5546eae | Remove debug print | cax/qsub.py | cax/qsub.py | """ Access the cluster.
Easy to use functions to make use of the cluster facilities.
This checks the available slots on the requested queue, creates the
scripts to submit, submits the jobs, and cleans up afterwards.
Example usage::
>>> import qsub
>>> qsub.submit_job('touch /data/hisparc/test', 'job_1', 'express')
"""
import logging
import os
from cax import config
import subprocess
import tempfile
from distutils.spawn import find_executable
def which(program):
"""Check if a command line program is available
An Exception is raised if the program is not available.
:param program: name or program to check for, e.g. 'wget'.
"""
path = find_executable(program)
if not path:
raise Exception('The program %s is not available.' % program)
def submit_job(script, extra=''):
"""Submit a job
:param script: contents of the script to run.
:param name: name for the job.
:param extra: optional extra arguments for the sbatch command.
"""
which('sbatch')
fileobj = create_script(script)
# Effect of the arguments for sbatch:
# http://slurm.schedmd.com/sbatch.html
sbatch = ('sbatch {extra} {script}'
.format(script=fileobj.name,
extra=extra))
try:
result = subprocess.check_output(sbatch,
stderr=subprocess.STDOUT,
shell=True,
timeout=120)
logging.info(result)
except subprocess.TimeoutExpired as e:
logging.error("Process timeout")
except Exception as e:
logging.exception(e)
delete_script(fileobj)
def create_script(script):
"""Create script as temp file to be run on cluster"""
fileobj = tempfile.NamedTemporaryFile(delete=True,
suffix='.sh',
mode='wt',
buffering=1)
fileobj.write(script)
os.chmod(fileobj.name, 0o774)
return fileobj
def delete_script(fileobj):
"""Delete script after submitting to cluster
:param script_path: path to the script to be removed
"""
fileobj.close()
def get_number_in_queue(host=config.get_hostname(), partition=''):
# print (len(get_queue(host, partition)), host, partition)
return len(get_queue(host, partition))
def get_queue(host=config.get_hostname(), partition=''):
"""Get list of jobs in queue"""
if host == "midway-login1":
args = {'partition': 'sandyb',
'user' : config.get_user()}
elif host == 'tegner-login-1':
args = {'partition': 'main',
'user' : 'bobau'}
else:
return []
if partition == '':
command = 'squeue --user={user} -o "%.30j"'.format(**args)
else:
args['partition'] = partition
command = 'squeue --partition={partition} --user={user} -o "%.30j"'.format(**args)
try:
queue = subprocess.check_output(command,
shell=True,
timeout=120)
except subprocess.TimeoutExpired as e:
logging.error("Process timeout")
return []
except Exception as e:
logging.exception(e)
return []
queue_list = queue.rstrip().decode('ascii').split()
if len(queue_list) > 1:
return queue_list[1:]
return []
| """ Access the cluster.
Easy to use functions to make use of the cluster facilities.
This checks the available slots on the requested queue, creates the
scripts to submit, submits the jobs, and cleans up afterwards.
Example usage::
>>> import qsub
>>> qsub.submit_job('touch /data/hisparc/test', 'job_1', 'express')
"""
import logging
import os
from cax import config
import subprocess
import tempfile
from distutils.spawn import find_executable
def which(program):
"""Check if a command line program is available
An Exception is raised if the program is not available.
:param program: name or program to check for, e.g. 'wget'.
"""
path = find_executable(program)
if not path:
raise Exception('The program %s is not available.' % program)
def submit_job(script, extra=''):
"""Submit a job
:param script: contents of the script to run.
:param name: name for the job.
:param extra: optional extra arguments for the sbatch command.
"""
which('sbatch')
fileobj = create_script(script)
# Effect of the arguments for sbatch:
# http://slurm.schedmd.com/sbatch.html
sbatch = ('sbatch {extra} {script}'
.format(script=fileobj.name,
extra=extra))
try:
result = subprocess.check_output(sbatch,
stderr=subprocess.STDOUT,
shell=True,
timeout=120)
logging.info(result)
except subprocess.TimeoutExpired as e:
logging.error("Process timeout")
except Exception as e:
logging.exception(e)
delete_script(fileobj)
def create_script(script):
"""Create script as temp file to be run on cluster"""
fileobj = tempfile.NamedTemporaryFile(delete=True,
suffix='.sh',
mode='wt',
buffering=1)
fileobj.write(script)
os.chmod(fileobj.name, 0o774)
return fileobj
def delete_script(fileobj):
"""Delete script after submitting to cluster
:param script_path: path to the script to be removed
"""
fileobj.close()
def get_number_in_queue(host=config.get_hostname(), partition=''):
print (len(get_queue(host, partition)), host, partition)
return len(get_queue(host, partition))
def get_queue(host=config.get_hostname(), partition=''):
"""Get list of jobs in queue"""
if host == "midway-login1":
args = {'partition': 'sandyb',
'user' : config.get_user()}
elif host == 'tegner-login-1':
args = {'partition': 'main',
'user' : 'bobau'}
else:
return []
if partition == '':
command = 'squeue --user={user} -o "%.30j"'.format(**args)
else:
args['partition'] = partition
command = 'squeue --partition={partition} --user={user} -o "%.30j"'.format(**args)
try:
queue = subprocess.check_output(command,
shell=True,
timeout=120)
except subprocess.TimeoutExpired as e:
logging.error("Process timeout")
return []
except Exception as e:
logging.exception(e)
return []
queue_list = queue.rstrip().decode('ascii').split()
if len(queue_list) > 1:
return queue_list[1:]
return []
| Python | 0.000003 |
ddaa9a687019794f61c019c5e3139a0b9ceaf521 | enable C++ exception-handling | binding.gyp | binding.gyp | {
"target_defaults": {
"conditions": [
['OS=="win"', {
}, {
'cflags' : [ "-fexceptions" ],
'cflags_cc' : [ "-fexceptions" ]
} ]
],
"configurations": {
"Release": {
'msvs_settings': {
'VCCLCompilerTool': {
'WholeProgramOptimization': 'true', # /GL, whole program optimization, needed for LTCG
'OmitFramePointers': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'RuntimeTypeInfo': 'false',
'ExceptionHandling': '1',
'AdditionalOptions': [
'/MP', '/EHsc'
]
},
'VCLibrarianTool': {
'AdditionalOptions': [
'/LTCG', # link time code generation
],
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': 1, # link-time code generation
'OptimizeReferences': 2, # /OPT:REF
'EnableCOMDATFolding': 2, # /OPT:ICF
'LinkIncremental': 1, # disable incremental linking
}
}
}
}
},
"targets": [
{
"target_name": "inchi",
"msvs_guid": "F1B917E2-75AB-A243-6D62-3C7938A1EF68",
"include_dirs": [
"<!(node -e \"require('nan')\")"
],
"dependencies": [
"deps/inchi/inchi.gyp:libINCHIAPI"
],
"sources": [
"src/node-inchi.cc",
"src/molecule.cc",
"src/atom.cc",
"src/molecule_wrap.cc",
"src/molecule_native.cc",
"src/inchi_atom.cc",
"src/get_inchi_data.cc",
"src/get_struct_from_inchi_data.cc",
"src/get_inchi.cc"
],
"conditions": [
['OS=="win"', {
}, {
'cflags_cc' : [
"-fexceptions"
]
} ]
],
},
{
"target_name": "test",
"type": "executable",
"sources": [
"src/test/TestMain.cc",
"src/test/hello.cc",
"src/test/test_molecule.cc",
"src/test/test_inchi_atom.cc",
"src/test/test_get_struct_from_inchi.cc",
"src/molecule_native.cc",
"src/get_inchi_data.cc",
"src/get_struct_from_inchi_data.cc",
"src/inchi_atom.cc"
],
"include_dirs": [
".",
"src",
"<!(node -e \"require('cppunitlite')\")",
"<!(node -e \"require('nan')\")"
],
"dependencies": [
"node_modules/cppunitlite/binding.gyp:CppUnitLite",
"deps/inchi/inchi.gyp:libINCHIAPI"
],
"conditions": [
['OS=="win"', {
}, {
'cflags_cc': [ '-fexceptions' ]
}
]
],
# sample unit test
}
]
}
| {
"target_defaults": {
"conditions": [
['OS=="win"', {
}, {
'cflags' : [ "-fexceptions" ],
'cflags_cc' : [ "-fexceptions" ]
} ]
],
"configurations": {
"Release": {
'msvs_settings': {
'VCCLCompilerTool': {
'WholeProgramOptimization': 'true', # /GL, whole program optimization, needed for LTCG
'OmitFramePointers': 'true',
'EnableFunctionLevelLinking': 'true',
'EnableIntrinsicFunctions': 'true',
'RuntimeTypeInfo': 'false',
'ExceptionHandling': '1',
'AdditionalOptions': [
'/MP'
]
},
'VCLibrarianTool': {
'AdditionalOptions': [
'/LTCG', # link time code generation
],
},
'VCLinkerTool': {
'LinkTimeCodeGeneration': 1, # link-time code generation
'OptimizeReferences': 2, # /OPT:REF
'EnableCOMDATFolding': 2, # /OPT:ICF
'LinkIncremental': 1, # disable incremental linking
}
}
}
}
},
"targets": [
{
"target_name": "inchi",
"msvs_guid": "F1B917E2-75AB-A243-6D62-3C7938A1EF68",
"include_dirs": [
"<!(node -e \"require('nan')\")"
],
"dependencies": [
"deps/inchi/inchi.gyp:libINCHIAPI"
],
"sources": [
"src/node-inchi.cc",
"src/molecule.cc",
"src/atom.cc",
"src/molecule_wrap.cc",
"src/molecule_native.cc",
"src/inchi_atom.cc",
"src/get_inchi_data.cc",
"src/get_struct_from_inchi_data.cc",
"src/get_inchi.cc"
],
"conditions": [
['OS=="win"', {
}, {
'cflags_cc' : [
"-fexceptions"
]
} ]
],
},
{
"target_name": "test",
"type": "executable",
"sources": [
"src/test/TestMain.cc",
"src/test/hello.cc",
"src/test/test_molecule.cc",
"src/test/test_inchi_atom.cc",
"src/test/test_get_struct_from_inchi.cc",
"src/molecule_native.cc",
"src/get_inchi_data.cc",
"src/get_struct_from_inchi_data.cc",
"src/inchi_atom.cc"
],
"include_dirs": [
".",
"src",
"<!(node -e \"require('cppunitlite')\")",
"<!(node -e \"require('nan')\")"
],
"dependencies": [
"node_modules/cppunitlite/binding.gyp:CppUnitLite",
"deps/inchi/inchi.gyp:libINCHIAPI"
],
"conditions": [
['OS=="win"', {
}, {
'cflags_cc': [ '-fexceptions' ]
}
]
],
# sample unit test
}
]
}
| Python | 0.000012 |
7998477a627a78b83f96894e72ec2f121c4b9606 | Update binding.gyp | binding.gyp | binding.gyp | {
"targets": [
{
'target_name': 'LDAP',
'sources': [
'src/LDAP.cc'
],
'include_dirs': [
'/usr/local/include'
],
'defines': [
'LDAP_DEPRECATED'
],
'cflags': [
'-Wall',
'-g'
],
'libraries': [
'-llber -lldap'
],
'ldflags': [
'-L/usr/local/lib'
],
'conditions': [
['OS=="linux"', {
'ldflags': [
'-luuid'
]
}
],
['OS=="mac"', {
"link_settings": {
"libraries": [
"-lldap"
]
}
}
]
]
}
]
}
| {
"targets": [
{
'target_name': 'LDAP',
'sources': [
'src/LDAP.cc'
],
'include_dirs': [
'/usr/local/include'
],
'defines': [
'LDAP_DEPRECATED'
],
'cflags': [
'-Wall',
'-g'
],
'ldflags': [
'-L/usr/local/lib',
'-lldap'
],
'conditions': [
['OS=="linux"', {
'ldflags': [
'-luuid'
]
}
],
['OS=="mac"', {
"link_settings": {
"libraries": [
"-lldap"
]
}
}
]
]
}
]
}
| Python | 0 |
996c8d4e9a65f411341f0c5f349ff3788cca0209 | Use unittest assertions. | rinse/tests/test_client.py | rinse/tests/test_client.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Unit tests for rinse.client module."""
import unittest
from lxml import etree
from mock import MagicMock, patch
from rinse.client import SoapClient
from rinse.message import SoapMessage
from .utils import captured_stdout
class TestSoapMessage(unittest.TestCase):
def test_soap_action(self):
"""Test that SOAP-Action HTTP header is set correctly."""
msg = SoapMessage(etree.Element('test'))
req = msg.request('http://example.com', 'testaction')
self.assertEqual(req.headers['SOAPAction'], 'testaction')
def test_no_soap_action(self):
"""Test that SOAP-Action HTTP header is absent when no action given.
"""
msg = SoapMessage(etree.Element('test'))
req = msg.request('http://example.com')
self.assertTrue('SOAPAction' not in req.headers)
def test_soap_action_is_none(self):
"""Test that SOAP-Action HTTP header is absent when no action is None.
"""
msg = SoapMessage(etree.Element('test'))
req = msg.request('http://example.com', None)
self.assertTrue('SOAPAction' not in req.headers)
self.assertTrue(msg.etree())
class TestRinseClient(unittest.TestCase):
def test_soap_action(self):
"""Test that SOAP action is passed on to SoapMessage.request()."""
msg = SoapMessage(etree.Element('test'))
msg.request = MagicMock()
with patch('requests.Session'):
client = SoapClient('http://example.com')
client(msg, 'testaction', build_response=lambda r: r)
msg.request.assert_called_once_with('http://example.com',
'testaction')
def test_soap_action_debug(self):
msg = SoapMessage(etree.Element('test'))
client = SoapClient('http://example.com', debug=True)
client._session = MagicMock()
with captured_stdout() as stdout:
client(msg, 'testaction', build_response=lambda r: r)
self.assertEqual(
stdout.getvalue(),
'POST http://example.com\n'
'Content-Length: 164\n'
'Content-Type: text/xml;charset=UTF-8\n'
'SOAPAction: testaction\n'
'\n'
'<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">\n'
' <soapenv:Header/>\n'
' <soapenv:Body>\n'
' <test/>\n'
' </soapenv:Body>\n'
'</soapenv:Envelope>\n'
'\n'
)
def test_no_soap_action(self):
"""Test that empty SOAP action is passed to SoapMessage.request()
when no action given."""
msg = SoapMessage(etree.Element('test'))
msg.request = MagicMock()
with patch('requests.Session'):
client = SoapClient('http://example.com')
client(msg, build_response=lambda r: r)
msg.request.assert_called_once_with('http://example.com', '')
def test_timeout(self):
msg = SoapMessage(etree.Element('test'))
msg.request = MagicMock()
client = SoapClient('http://example.com', timeout=1)
self.assertEqual(client.timeout, 1)
with patch('requests.Session'):
client(msg, 'testaction', build_response=lambda r: r)
self.assertEqual(client._session.send.call_args[1]['timeout'], 1)
with patch('requests.Session'):
client(msg, 'testaction', build_response=lambda r: r, timeout=2)
self.assertEqual(client._session.send.call_args[1]['timeout'], 2)
with patch('requests.Session'):
client(msg, 'testaction', build_response=lambda r: r, timeout=None)
self.assertEqual(client._session.send.call_args[1]['timeout'], None)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Unit tests for rinse.client module."""
import unittest
from lxml import etree
from mock import MagicMock, patch
from rinse.client import SoapClient
from rinse.message import SoapMessage
from .utils import captured_stdout
class TestSoapMessage(unittest.TestCase):
def test_soap_action(self):
"""Test that SOAP-Action HTTP header is set correctly."""
msg = SoapMessage(etree.Element('test'))
req = msg.request('http://example.com', 'testaction')
self.assertEqual(req.headers['SOAPAction'], 'testaction')
def test_no_soap_action(self):
"""Test that SOAP-Action HTTP header is absent when no action given.
"""
msg = SoapMessage(etree.Element('test'))
req = msg.request('http://example.com')
self.assertTrue('SOAPAction' not in req.headers)
def test_soap_action_is_none(self):
"""Test that SOAP-Action HTTP header is absent when no action is None.
"""
msg = SoapMessage(etree.Element('test'))
req = msg.request('http://example.com', None)
self.assertTrue('SOAPAction' not in req.headers)
class TestRinseClient(unittest.TestCase):
def test_soap_action(self):
"""Test that SOAP action is passed on to SoapMessage.request()."""
msg = SoapMessage(etree.Element('test'))
msg.request = MagicMock()
with patch('requests.Session'):
client = SoapClient('http://example.com')
client(msg, 'testaction', build_response=lambda r: r)
msg.request.assert_called_once_with('http://example.com',
'testaction')
def test_soap_action_debug(self):
msg = SoapMessage(etree.Element('test'))
client = SoapClient('http://example.com', debug=True)
client._session = MagicMock()
with captured_stdout() as stdout:
client(msg, 'testaction', build_response=lambda r: r)
self.assertEqual(
stdout.getvalue(),
'POST http://example.com\n'
'Content-Length: 164\n'
'Content-Type: text/xml;charset=UTF-8\n'
'SOAPAction: testaction\n'
'\n'
'<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">\n'
' <soapenv:Header/>\n'
' <soapenv:Body>\n'
' <test/>\n'
' </soapenv:Body>\n'
'</soapenv:Envelope>\n'
'\n'
)
def test_no_soap_action(self):
"""Test that empty SOAP action is passed to SoapMessage.request()
when no action given."""
msg = SoapMessage(etree.Element('test'))
msg.request = MagicMock()
with patch('requests.Session'):
client = SoapClient('http://example.com')
client(msg, build_response=lambda r: r)
msg.request.assert_called_once_with('http://example.com', '')
def test_timeout(self):
msg = SoapMessage(etree.Element('test'))
msg.request = MagicMock()
client = SoapClient('http://example.com', timeout=1)
assert client.timeout == 1
with patch('requests.Session'):
client(msg, 'testaction', build_response=lambda r: r)
assert client._session.send.call_args[1]['timeout'] == 1
with patch('requests.Session'):
client(msg, 'testaction', build_response=lambda r: r, timeout=2)
assert client._session.send.call_args[1]['timeout'] == 2
with patch('requests.Session'):
client(msg, 'testaction', build_response=lambda r: r, timeout=None)
assert client._session.send.call_args[1]['timeout'] is None
if __name__ == '__main__':
unittest.main()
| Python | 0 |
182c2ea095dc5207b6d66ce1ad8e2ad2dc986da2 | Fix test skipper | skimage/_shared/tests/test_testing.py | skimage/_shared/tests/test_testing.py | """ Testing decorators module
"""
import numpy as np
from nose.tools import (assert_true, assert_raises, assert_equal)
from skimage._shared.testing import doctest_skip_parser, test_parallel
def test_skipper():
def f():
pass
class c():
def __init__(self):
self.me = "I think, therefore..."
docstring = \
""" Header
>>> something # skip if not HAVE_AMODULE
>>> something + else
>>> a = 1 # skip if not HAVE_BMODULE
>>> something2 # skip if HAVE_AMODULE
"""
f.__doc__ = docstring
c.__doc__ = docstring
global HAVE_AMODULE, HAVE_BMODULE
HAVE_AMODULE = False
HAVE_BMODULE = True
f2 = doctest_skip_parser(f)
c2 = doctest_skip_parser(c)
assert_true(f is f2)
assert_true(c is c2)
expected = \
""" Header
>>> something # doctest: +SKIP
>>> something + else
>>> a = 1
>>> something2
"""
assert_equal(f2.__doc__, expected)
assert_equal(c2.__doc__, expected)
HAVE_AMODULE = True
HAVE_BMODULE = False
f.__doc__ = docstring
c.__doc__ = docstring
f2 = doctest_skip_parser(f)
c2 = doctest_skip_parser(c)
assert_true(f is f2)
expected = \
""" Header
>>> something
>>> something + else
>>> a = 1 # doctest: +SKIP
>>> something2 # doctest: +SKIP
"""
assert_equal(f2.__doc__, expected)
assert_equal(c2.__doc__, expected)
del HAVE_AMODULE
f.__doc__ = docstring
c.__doc__ = docstring
assert_raises(NameError, doctest_skip_parser, f)
assert_raises(NameError, doctest_skip_parser, c)
def test_test_parallel():
state = []
@test_parallel()
def change_state1():
state.append(None)
change_state1()
assert len(state) == 2
@test_parallel(num_threads=1)
def change_state2():
state.append(None)
change_state2()
assert len(state) == 3
@test_parallel(num_threads=3)
def change_state3():
state.append(None)
change_state3()
assert len(state) == 6
if __name__ == '__main__':
np.testing.run_module_suite()
| """ Testing decorators module
"""
import numpy as np
from nose.tools import (assert_true, assert_raises, assert_equal)
from skimage._shared.testing import doctest_skip_parser, test_parallel
def test_skipper():
def f():
pass
class c():
def __init__(self):
self.me = "I think, therefore..."
docstring = \
""" Header
>>> something # skip if not HAVE_AMODULE
>>> something + else
>>> a = 1 # skip if not HAVE_BMODULE
>>> something2 # skip if HAVE_AMODULE
"""
f.__doc__ = docstring
c.__doc__ = docstring
global HAVE_AMODULE, HAVE_BMODULE
HAVE_AMODULE = False
HAVE_BMODULE = True
f2 = doctest_skip_parser(f)
c2 = doctest_skip_parser(c)
assert_true(f is f2)
assert_true(c is c2)
assert_equal(f2.__doc__,
""" Header
>>> something # doctest: +SKIP
>>> something + else
>>> a = 1
>>> something2
""")
assert_equal(c2.__doc__,
""" Header
>>> something # doctest: +SKIP
>>> something + else
>>> a = 1
>>> something2
""")
HAVE_AMODULE = True
HAVE_BMODULE = False
f.__doc__ = docstring
c.__doc__ = docstring
f2 = doctest_skip_parser(f)
c2 = doctest_skip_parser(c)
assert_true(f is f2)
assert_equal(f2.__doc__,
""" Header
>>> something
>>> something + else
>>> a = 1 # doctest: +SKIP
>>> something2 # doctest: +SKIP
""")
assert_equal(c2.__doc__,
""" Header
>>> something
>>> something + else
>>> a = 1 # doctest: +SKIP
>>> something2 # doctest: +SKIP
""")
del HAVE_AMODULE
f.__doc__ = docstring
c.__doc__ = docstring
assert_raises(NameError, doctest_skip_parser, f)
assert_raises(NameError, doctest_skip_parser, c)
def test_test_parallel():
state = []
@test_parallel()
def change_state1():
state.append(None)
change_state1()
assert len(state) == 2
@test_parallel(num_threads=1)
def change_state2():
state.append(None)
change_state2()
assert len(state) == 3
@test_parallel(num_threads=3)
def change_state3():
state.append(None)
change_state3()
assert len(state) == 6
if __name__ == '__main__':
np.testing.run_module_suite()
| Python | 0.000003 |
01bee58d06a2af3f94e3a1be954abd845da52ba1 | Update language.py | language_detection/language.py | language_detection/language.py | import pickle
import os
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import pairwise
class language_detection:
def __init__(self):
# ''' Constructor for this class. '''
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.svd=pickle.load(open(__location__+"//data//svd.MODEL",'rb'))
self.vocabulary=pickle.load(open(__location__+"//data//lexicon.MODEL",'rb'))
self.id_of_languages=pickle.load(open(__location__+"//data//language_id.MODEL",'rb'))
self.svdmatrix=pickle.load(open(__location__+"//data//svdmatrix.MODEL",'rb'))
def language_list(self):
lan_list=[]
for item in self.id_of_languages.keys():
lan_list.append((self.id_of_languages[item]['id'],self.id_of_languages[item]['name']))
return lan_list
def index_finder(self,text):
clean_text=self.clean(text)
n_gram=self.ngram_extractor(clean_text)
matrix=self.ngram_to_matrix(n_gram)
svd_matrix=self.svd_transform(matrix)
index=self.detect_similarity(svd_matrix)
return index
def language_id(self,text):
index=self.index_finder(text)
print "The Language ID is: "+self.id_of_languages[index]["id"]
return self.id_of_languages[index]["id"]
def language_name(self,text):
index=self.index_finder(text)
print "The Language Name is: "+self.id_of_languages[index]["name"]
return self.id_of_languages[index]["name"]
def clean(self,text):
try:
clean_text=text.decode("utf8").lower()
except:
clean_text=text.lower()
clean_text=clean_text.replace(" ","")
return clean_text
def ngram_extractor(self,text):
n_gram_list=[]
for i in range(len(text)-3):
n_gram_list.append(text[i:i+4])
return list(set(n_gram_list))
def ngram_to_matrix(self,n_gram):
matrix=[0]*len(self.vocabulary)
for gram in n_gram:
try:
position=self.vocabulary[gram]
matrix[position]=1
except:
pass
return matrix
def svd_transform(self,matrix):
return self.svd.transform([matrix])
def detect_similarity(self,svd_matrix):
ind=0
max_sim=-1
sim=pairwise.cosine_similarity(self.svdmatrix,svd_matrix)
for i in range(len(sim)):
if(sim[i]>max_sim):
max_sim=sim[i]
ind=i
return ind
| import pickle
import os
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import pairwise
class language_detection:
def __init__(self):
# ''' Constructor for this class. '''
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
self.svd=pickle.load(open(__location__+"//data//svd.MODEL",'rb'))
self.vocabulary=pickle.load(open(__location__+"//data//lexicon.MODEL",'rb'))
self.id_of_languages=pickle.load(open(__location__+"//data//language_id.MODEL",'rb'))
self.svdmatrix=pickle.load(open(__location__+"//data//svdmatrix.MODEL",'rb'))
def language_list(self):
lan_list=[]
for item in self.id_of_languages.keys():
lan_list.append((self.id_of_languages[item]['id'],self.id_of_languages[item]['name']))
return lan_list
def index_finder(self,text):
clean_text=self.clean(text)
n_gram=self.ngram_extractor(clean_text)
matrix=self.ngram_to_matrix(n_gram)
svd_matrix=self.svd_transform(matrix)
index=self.detect_similarity(svd_matrix)
return index
def langauge_id(self,text):
index=self.index_finder(text)
print "The Language ID is: "+self.id_of_languages[index]["id"]
return self.id_of_languages[index]["id"]
def langauge_name(self,text):
index=self.index_finder(text)
print "The Language Name is: "+self.id_of_languages[index]["name"]
return self.id_of_languages[index]["name"]
def clean(self,text):
try:
clean_text=text.decode("utf8").lower()
except:
clean_text=text.lower()
clean_text=clean_text.replace(" ","")
return clean_text
def ngram_extractor(self,text):
n_gram_list=[]
for i in range(len(text)-3):
n_gram_list.append(text[i:i+4])
return list(set(n_gram_list))
def ngram_to_matrix(self,n_gram):
matrix=[0]*len(self.vocabulary)
for gram in n_gram:
try:
position=self.vocabulary[gram]
matrix[position]=1
except:
pass
return matrix
def svd_transform(self,matrix):
return self.svd.transform([matrix])
def detect_similarity(self,svd_matrix):
ind=0
max_sim=-1
sim=pairwise.cosine_similarity(self.svdmatrix,svd_matrix)
for i in range(len(sim)):
if(sim[i]>max_sim):
max_sim=sim[i]
ind=i
return ind
| Python | 0.000001 |
2a65b1715e469e11ed73faf7f3446f81c836c42e | Add fetch domain logic | handler/domain.py | handler/domain.py | #!/usr/bin/python
# -*- coding:utf-8 -*-
# Powered By KK Studio
# Domain Page
from BaseHandler import BaseHandler
from tornado.web import authenticated as Auth
from model.models import Domain, Groups, Record
class IndexHandler(BaseHandler):
@Auth
def get(self):
page = int(self.get_argument('page', 1))
line = int(self.get_argument('line', 20))
offset = (page - 1) * line
data = self.db.query(Domain).offset(offset).limit(line).all()
self.render('domain/index.html',data=data)
class GroupHandler(BaseHandler):
@Auth
def get(self):
self.render('domain/group.html')
class RecordHandler(BaseHandler):
@Auth
def get(self):
self.render('domain/record.html')
| #!/usr/bin/python
# -*- coding:utf-8 -*-
# Powered By KK Studio
# Domain Page
from BaseHandler import BaseHandler
from tornado.web import authenticated as Auth
class IndexHandler(BaseHandler):
@Auth
def get(self):
self.render('domain/index.html')
class GroupHandler(BaseHandler):
@Auth
def get(self):
self.render('domain/group.html')
class RecordHandler(BaseHandler):
@Auth
def get(self):
self.render('domain/record.html')
| Python | 0.000001 |
c7412824c1e9edb7c386f111ce30b5d76952f861 | Remove 'reviews' from Context API return | mendel/serializers.py | mendel/serializers.py | from .models import Keyword, Category, Document, Context, Review, User
from rest_auth.models import TokenModel
from rest_framework import serializers
class KeywordSerializer(serializers.ModelSerializer):
class Meta:
model = Keyword
fields = ('id', 'name', 'definition')
def create(self, validated_data):
instance, _ = Keyword.objects.get_or_create(**validated_data)
return instance
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'name', 'description')
class ContextSerializer(serializers.ModelSerializer):
keyword_given = KeywordSerializer()
user_reviews = serializers.SerializerMethodField('get_reviews')
def get_reviews(self, obj):
results = Review.objects.filter(user=self.context['request'].user)
return ReviewSerializer(results, many=True).data
class Meta:
model = Context
fields = ('id', 'position_from', 'position_to', 'text', 'document', 'keyword_given', 'next_context_id', 'prev_context_id', 'user_reviews')
depth = 1
class DocumentSerializer(serializers.ModelSerializer):
class Meta:
model = Document
fields = ('__all__')
class ReviewSerializer(serializers.ModelSerializer):
class Meta:
model = Review
fields = ('__all__')
class UserSerializer(serializers.ModelSerializer):
last_context_id = serializers.SerializerMethodField('return_last_context_id')
def return_last_context_id(self, user):
try:
return Review.objects.filter(user=user.id).latest('created').context.id
except:
return Context.objects.first().id if Context.objects.first() else None
class Meta:
model = User
fields = ('id', 'username', 'is_staff', 'last_context_id')
class TokenSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = TokenModel
fields = ('key','user',)
depth = 1
| from .models import Keyword, Category, Document, Context, Review, User
from rest_auth.models import TokenModel
from rest_framework import serializers
class KeywordSerializer(serializers.ModelSerializer):
class Meta:
model = Keyword
fields = ('id', 'name', 'definition')
def create(self, validated_data):
instance, _ = Keyword.objects.get_or_create(**validated_data)
return instance
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('id', 'name', 'description')
class ContextSerializer(serializers.ModelSerializer):
keyword_given = KeywordSerializer()
user_reviews = serializers.SerializerMethodField('get_reviews')
def get_reviews(self, obj):
results = Review.objects.filter(user=self.context['request'].user)
return ReviewSerializer(results, many=True).data
class Meta:
model = Context
fields = ('id', 'position_from', 'position_to', 'text', 'document', 'keyword_given', 'next_context_id', 'prev_context_id', 'reviews', 'user_reviews')
depth = 1
class DocumentSerializer(serializers.ModelSerializer):
class Meta:
model = Document
fields = ('__all__')
class ReviewSerializer(serializers.ModelSerializer):
class Meta:
model = Review
fields = ('__all__')
class UserSerializer(serializers.ModelSerializer):
last_context_id = serializers.SerializerMethodField('return_last_context_id')
def return_last_context_id(self, user):
try:
return Review.objects.filter(user=user.id).latest('created').context.id
except:
return Context.objects.first().id if Context.objects.first() else None
class Meta:
model = User
fields = ('id', 'username', 'is_staff', 'last_context_id')
class TokenSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = TokenModel
fields = ('key','user',)
depth = 1
| Python | 0.000006 |
f471441bde9940e46badd0ec506c18e8587de004 | Optimize the rebuild admin | metaci/build/admin.py | metaci/build/admin.py | from django.contrib import admin
from metaci.build.models import Build
from metaci.build.models import BuildFlow
from metaci.build.models import FlowTask
from metaci.build.models import Rebuild
class BuildAdmin(admin.ModelAdmin):
list_display = (
'repo',
'plan',
'branch',
'commit',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('repo', 'plan')
list_select_related = ('branch','repo','plan')
raw_id_fields = ('branch', 'plan', 'repo', 'org', 'org_instance', 'current_rebuild')
admin.site.register(Build, BuildAdmin)
class BuildFlowAdmin(admin.ModelAdmin):
list_display = (
'build',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('build__repo', 'build')
admin.site.register(BuildFlow, BuildFlowAdmin)
class FlowTaskAdmin(admin.ModelAdmin):
list_display = ('id', 'build_flow', 'stepnum', 'path', 'status')
list_filter = ('build_flow__build__repo',)
raw_id_fields = ['build_flow']
admin.site.register(FlowTask, FlowTaskAdmin)
class RebuildAdmin(admin.ModelAdmin):
list_display = (
'build',
'user',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('build__repo', 'build__plan')
raw_id_fields = ('build', 'org_instance')
admin.site.register(Rebuild, RebuildAdmin)
| from django.contrib import admin
from metaci.build.models import Build
from metaci.build.models import BuildFlow
from metaci.build.models import FlowTask
from metaci.build.models import Rebuild
class BuildAdmin(admin.ModelAdmin):
list_display = (
'repo',
'plan',
'branch',
'commit',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('repo', 'plan')
list_select_related = ('branch','repo','plan')
raw_id_fields = ('branch', 'plan', 'repo', 'org', 'org_instance', 'current_rebuild')
admin.site.register(Build, BuildAdmin)
class BuildFlowAdmin(admin.ModelAdmin):
list_display = (
'build',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('build__repo', 'build')
admin.site.register(BuildFlow, BuildFlowAdmin)
class FlowTaskAdmin(admin.ModelAdmin):
list_display = ('id', 'build_flow', 'stepnum', 'path', 'status')
list_filter = ('build_flow__build__repo',)
raw_id_fields = ['build_flow']
admin.site.register(FlowTask, FlowTaskAdmin)
class RebuildAdmin(admin.ModelAdmin):
list_display = (
'build',
'user',
'status',
'time_queue',
'time_start',
'time_end',
)
list_filter = ('build__repo', 'build')
admin.site.register(Rebuild, RebuildAdmin)
| Python | 0.000003 |
206f76026504219ed52f2fcca1b6b64b78bdcf21 | Add some print statements | software/lightpowertool/csv_export.py | software/lightpowertool/csv_export.py | import csv
class CSVExport(object):
"""docstring for CSVExport"""
def __init__(self, filename):
super(CSVExport, self).__init__()
self._filename = filename
def export_data(self, data):
print("Beginning exportation of data...")
with open(self._filename, "w", newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for single_data in data:
csvwriter.writerow(list(single_data))
print("Exportation has been done!")
| import csv
class CSVExport(object):
"""docstring for CSVExport"""
def __init__(self, filename):
super(CSVExport, self).__init__()
self._filename = filename
def export_data(self, data):
with open(self._filename, "w", newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
for single_data in data:
csvwriter.writerow(list(single_data))
| Python | 0.006669 |
3121a02c6174a31b64974d57a3ec2d7df760a7ae | Ajoute une référence législative au taux d'incapacité | openfisca_france/model/caracteristiques_socio_demographiques/capacite_travail.py | openfisca_france/model/caracteristiques_socio_demographiques/capacite_travail.py | # -*- coding: utf-8 -*-
from openfisca_france.model.base import *
class taux_capacite_travail(Variable):
value_type = float
default_value = 1.0
entity = Individu
label = u"Taux de capacité de travail, appréciée par la commission des droits et de l'autonomie des personnes handicapées (CDAPH)"
definition_period = MONTH
class taux_incapacite(Variable):
value_type = float
entity = Individu
label = u"Taux d'incapacité"
definition_period = MONTH
reference = "https://www.legifrance.gouv.fr/affichCodeArticle.do;jsessionid=BD54F4B28313142C87FC8B96013E0441.tplgfr44s_1?idArticle=LEGIARTI000023097719&cidTexte=LEGITEXT000006073189&dateTexte=20190312"
documentation = "Taux d'incapacité retenu pour l'Allocation Adulte Handicapé (AAH)."
| # -*- coding: utf-8 -*-
from openfisca_france.model.base import *
class taux_capacite_travail(Variable):
value_type = float
default_value = 1.0
entity = Individu
label = u"Taux de capacité de travail, appréciée par la commission des droits et de l'autonomie des personnes handicapées (CDAPH)"
definition_period = MONTH
class taux_incapacite(Variable):
value_type = float
entity = Individu
label = u"Taux d'incapacité"
definition_period = MONTH
| Python | 0.000012 |
a279cb4340c6da5ed64b39660cfcb5ef53d0bb74 | Fix test | tests/core/test_node.py | tests/core/test_node.py | from common import auth_check
def test_node_fields(mc):
cclient = mc.client
fields = {
'nodeTaints': 'r',
'nodeLabels': 'r',
'nodeAnnotations': 'r',
'namespaceId': 'cr',
'conditions': 'r',
'allocatable': 'r',
'capacity': 'r',
'hostname': 'r',
'info': 'r',
'ipAddress': 'r',
'limits': 'r',
'nodePoolUuid': 'r',
'nodeName': 'r',
'requested': 'r',
'clusterId': 'cr',
'etcd': 'cr',
'controlPlane': 'cr',
'worker': 'cr',
'requestedHostname': 'cr',
'volumesAttached': 'r',
'nodeTemplateId': 'cr',
'volumesInUse': 'r',
'podCidr': 'r',
'name': 'cru',
'taints': 'ru',
'unschedulable': 'ru',
'providerId': 'r',
'sshUser': 'r',
'imported': "cru",
}
for name, field in cclient.schema.types['node'].resourceFields.items():
if name.endswith("Config"):
fields[name] = 'cr'
fields['customConfig'] = 'cru'
auth_check(cclient.schema, 'node', 'crud', fields)
| from common import auth_check
def test_node_fields(mc):
cclient = mc.client
fields = {
'nodeTaints': 'r',
'nodeLabels': 'r',
'nodeAnnotations': 'r',
'namespaceId': 'cr',
'conditions': 'r',
'allocatable': 'r',
'capacity': 'r',
'hostname': 'r',
'info': 'r',
'ipAddress': 'r',
'limits': 'r',
'nodePoolUuid': 'r',
'nodeName': 'r',
'requested': 'r',
'clusterId': 'cr',
'etcd': 'cru',
'controlPlane': 'cru',
'worker': 'cru',
'requestedHostname': 'cr',
'volumesAttached': 'r',
'nodeTemplateId': 'cr',
'volumesInUse': 'r',
'podCidr': 'r',
'name': 'cru',
'taints': 'ru',
'unschedulable': 'ru',
'providerId': 'r',
'sshUser': 'r',
'imported': "cru",
}
for name, field in cclient.schema.types['node'].resourceFields.items():
if name.endswith("Config"):
fields[name] = 'cr'
fields['customConfig'] = 'cru'
auth_check(cclient.schema, 'node', 'crud', fields)
| Python | 0.000004 |
7b67ae8910b90dda49d370dd95fb5969a9a5d16b | Fix restrict_quadrants and add all 4 corners | cell.py | cell.py | from __future__ import division
import collections
import itertools
import math
Spoke = collections.namedtuple('Spoke', 'start, end')
def get_neighbours(cell, include_self=False):
"""
Get 8 neighbouring cell coords to a start cell.
If `include_self` is True, returns the current (center) cell as well.
"""
offsets = list(itertools.product([0, 1, -1], repeat=2))
if not include_self:
del offsets[offsets.index((0, 0))] # Don't include start cell
return [(cell[0] + dx, cell[1] + dy) for dx, dy in offsets]
def restrict_quadrants(neighbours, start, end):
cells = neighbours[:]
if end[0] > start[0]:
cells = [x for x in cells if x[0] >= start[0]]
elif end[0] < start[0]:
cells = [x for x in cells if x[0] <= start[0]]
if end[1] > start[1]:
cells = [x for x in cells if x[1] >= start[1]]
elif end[1] < start[1]:
cells = [x for x in cells if x[1] <= start[1]]
return cells
def right_intersection(point, line):
"""
Determine the point at which a point is closest to a line
A line through that point would intersect at a right angle.
"""
if line.start[1] == line.end[1]: # line is horizontal (same y values)
return (point[0], line.start[1])
elif line.start[0] == line.end[0]: # line is vertical (same x values)
return (line.start[0], point[1])
m = (line.end[1] - line.start[1]) / (line.end[0] - line.start[0]) # slope
b = line.start[1] - m * line.start[0] # y-intercept
c = point[1] + point[0] / m # y-intercept of intersecting line
x = m * (c - b) / (m ** 2 + 1) # x-coord of intersection
y = m * x + b # y-coord of intersection
return (x, y)
def point_distance(start, end, squared=False):
"""
Calculate distance between two points using Pythagorean theorem.
"""
d_squared = (end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2
if squared:
return d_squared
else:
return math.sqrt(d_squared)
def discretize_line(start, end):
"""
Turn start and end points (which are integer (x, y) tuples) into
a list of integer (x, y) points forming a line.
"""
max_length = abs(end[1] - start[1]) + abs(end[0] - start[0])
Line = collections.namedtuple('Line', 'start, end')
line = Line(start, end)
print(line)
results = [start]
seen = set()
while start != end:
neighbours = get_neighbours(start)
neighbours = restrict_quadrants(neighbours, start, end)
print('\nnext round')
print(neighbours)
next_cell = None
min_distance = float('inf')
for cell in neighbours:
if cell in seen: # Don't go backwards
continue
intersection = right_intersection(cell, line)
distance = point_distance(cell, intersection)
print(cell, distance)
if distance < min_distance:
min_distance = distance
next_cell = cell
results.append(next_cell)
if len(results) > max_length: # Failed!
return None
seen.add(next_cell)
start = next_cell
return results
| from __future__ import division
import collections
import itertools
import math
Spoke = collections.namedtuple('Spoke', 'start, end')
def get_neighbours(cell, include_self=False):
"""
Get 8 neighbouring cell coords to a start cell.
If `include_self` is True, returns the current (center) cell as well.
"""
offsets = list(itertools.product([0, 1, -1], repeat=2))
if not include_self:
del offsets[offsets.index((0, 0))] # Don't include start cell
return [(cell[0] + dx, cell[1] + dy) for dx, dy in offsets]
def restrict_quadrants(neighbours, start, end):
if end[0] > start[0]:
cells = [x for x in neighbours if x[0] >= start[0]]
if end[1] > start[1]:
cells = [x for x in neighbours if x[1] >= start[1]]
return cells
def right_intersection(point, line):
"""
Determine the point at which a point is closest to a line
A line through that point would intersect at a right angle.
"""
if line.start[1] == line.end[1]: # line is horizontal (same y values)
return (point[0], line.start[1])
elif line.start[0] == line.end[0]: # line is vertical (same x values)
return (line.start[0], point[1])
m = (line.end[1] - line.start[1]) / (line.end[0] - line.start[0]) # slope
b = line.start[1] - m * line.start[0] # y-intercept
c = point[1] + point[0] / m # y-intercept of intersecting line
x = m * (c - b) / (m ** 2 + 1) # x-coord of intersection
y = m * x + b # y-coord of intersection
return (x, y)
def point_distance(start, end, squared=False):
"""
Calculate distance between two points using Pythagorean theorem.
"""
d_squared = (end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2
if squared:
return d_squared
else:
return math.sqrt(d_squared)
def discretize_line(start, end):
"""
Turn start and end points (which are integer (x, y) tuples) into
a list of integer (x, y) points forming a line.
"""
max_length = abs(end[1] - start[1]) + abs(end[0] - start[0])
Line = collections.namedtuple('Line', 'start, end')
line = Line(start, end)
print(line)
results = [start]
seen = set()
while start != end:
neighbours = get_neighbours(start)
neighbours = restrict_quadrants(neighbours, start, end)
print('\nnext round')
print(neighbours)
next_cell = None
min_distance = float('inf')
for cell in neighbours:
if cell in seen: # Don't go backwards
continue
intersection = right_intersection(cell, line)
distance = point_distance(cell, intersection)
print(cell, distance)
if distance < min_distance:
min_distance = distance
next_cell = cell
results.append(next_cell)
if len(results) > max_length: # Failed!
return None
seen.add(next_cell)
start = next_cell
return results
| Python | 0.000001 |
1bf3e893e45e0dc16e2e820f5f073a63600217c3 | Fix errors in PeriodicFilter | robotpy_ext/misc/periodic_filter.py | robotpy_ext/misc/periodic_filter.py | import logging
import time
class PeriodicFilter:
"""
Periodic Filter to help keep down clutter in the console.
Simply add this filter to your logger and the logger will
only print periodically.
The logger will always print logging levels of WARNING or higher
"""
def __init__(self, period, bypassLevel=logging.WARN):
'''
:param period: Wait period (in seconds) between logs
:param bypassLevel: Lowest logging level that the filter should ignore
'''
self._period = period
self._loggingLoop = True
self._last_log = -period
self._bypassLevel = bypassLevel
def filter(self, record):
"""Performs filtering action for logger"""
self._refresh_logger()
return self._loggingLoop or record.levelno >= self._bypassLevel
def _refresh_logger(self):
"""Determine if the log wait period has passed"""
now = time.monotonic()
self._loggingLoop = False
if now - self._last_log > self._period:
self._loggingLoop = True
self._last_log = now
| import logging
import wpilib
class PeriodicFilter:
"""
Periodic Filter to help keep down clutter in the console.
Simply add this filter to your logger and the logger will
only print periodically.
The logger will always print logging levels of WARNING or higher
"""
def __init__(self, period, bypassLevel=logging.WARN):
'''
:param period: Wait period (in seconds) between logs
:param bypassLevel: Lowest logging level that the filter should ignore
'''
self.period = period
self.loggingLoop = True
self._last_log = -period
self.bypassLevel = bypassLevel
def filter(self, record):
"""Performs filtering action for logger"""
self._refresh_logger()
return self.parent.loggingLoop or record.levelno >= self.bypassLevel
def _refresh_logger(self):
"""Determine if the log wait period has passed"""
now = wpilib.Timer.getFPGATimestamp()
self.loggingLoop = False
if now - self.__last_log > self.logging_interval:
self.loggingLoop = True
self.__last_log = now
| Python | 0.000119 |
881b5c56e89fb2fdb7d4af3a9ec5c5044a25b878 | declare dummy functions | ansible/lib/modules/rally/library/test.py | ansible/lib/modules/rally/library/test.py | from ansible.module_utils.basic import *
DOCUMENTATION = '''
---
module: rally
short_description: Executes rally commands
'''
def main():
fields = {
"scenario_file" : {"required": True, "type": "str"},
"scenario_args" : {"required" : False, "type": "str"},
}
commands = {'create_db', 'create_deployment', 'check_deployment', 'start_task' , 'task_report' }
module = AnsibleModule(argument_spec={})
response = {"hello": "world"}
module.exit_json(changed=False, meta=response)
def create_db:
pass
def create_deployment:
pass
def check_deployment:
pass
def start_task:
pass
def task_report:
pass
if __name__ == '__main__':
# CALL Rally loader
# TODO
main()
| from ansible.module_utils.basic import *
DOCUMENTATION = '''
---
module: rally
short_description: Executes rally commands
'''
def main():
fields = {
"scenario_file" : {"required": True, "type": "str"},
"scenario_args" : {"required" : False, "type": "str"},
}
commands = {'create_db', 'create_deployment', 'check_deployment', 'start_task' }
module = AnsibleModule(argument_spec={})
response = {"hello": "world"}
module.exit_json(changed=False, meta=response)
if __name__ == '__main__':
main()
| Python | 0.000034 |
27330e69226f36b49f5d5eca5a67af29ee8d679b | Normalize the weasyl link. | conbadge.py | conbadge.py | from fractions import Fraction
from cStringIO import StringIO
from PIL import Image, ImageDraw, ImageFont
import qrcode
import requests
museo = ImageFont.truetype('Museo500-Regular.otf', 424)
badge_back = Image.open('badge-back.png')
logo_stamp = Image.open('logo-stamp.png')
qr_size = 975, 975
qr_offset = 75, 75
name_color = 6, 155, 192
text_bounds = 735, 125
text_offset = 365, 1155
avatar_bounds = 282, 282
avatar_offset = 49, 1131
class AvatarFetchError(Exception):
pass
def draw_text(text, color, fit_size):
text_size = museo.getsize(text)
img = Image.new('RGBA', text_size, color + (0,))
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, color + (255,), font=museo)
width, height = img.size
fit_width, fit_height = fit_size
width_ratio = Fraction(width, fit_width)
height_ratio = Fraction(height, fit_height)
if width_ratio > height_ratio:
new_size = fit_width, int(height / width_ratio)
else:
new_size = int(width / height_ratio), fit_height
return img.resize(new_size, Image.ANTIALIAS)
def center(size, fit_size, offset):
w, h = size
fw, fh = fit_size
x, y = offset
return x + (fw - w) // 2, y + (fh - h) // 2
logo_pos = center(logo_stamp.size, qr_size, qr_offset)
def weasyl_sysname(target):
return ''.join(i for i in target if i.isalnum()).lower()
def weasyl_badge(username, avatar_resizing=Image.ANTIALIAS):
r = requests.get(
'https://www.weasyl.com/api/useravatar', params={'username': username})
resp = r.json()
if resp['error']['code'] != 0:
raise AvatarFetchError(resp['error'])
back = badge_back.copy()
qr = qrcode.QRCode(
error_correction=qrcode.constants.ERROR_CORRECT_H, border=1)
qr.add_data('https://weasyl.com/~%s' % (weasyl_sysname(username),))
qr_mask = qr.make_image().resize(qr_size)
back.paste((255, 255, 255, 255), qr_offset, qr_mask)
back.paste(logo_stamp, logo_pos, logo_stamp)
text = draw_text(username, name_color, text_bounds)
text_pos = center(text.size, text_bounds, text_offset)
back.paste(text, text_pos, text)
avatar = Image.open(StringIO(requests.get(resp['avatar']).content))
avatar = avatar.resize(avatar_bounds, avatar_resizing).convert('RGBA')
back.paste(avatar, avatar_offset, avatar)
return back
| from fractions import Fraction
from cStringIO import StringIO
from PIL import Image, ImageDraw, ImageFont
import qrcode
import requests
museo = ImageFont.truetype('Museo500-Regular.otf', 424)
badge_back = Image.open('badge-back.png')
logo_stamp = Image.open('logo-stamp.png')
qr_size = 975, 975
qr_offset = 75, 75
name_color = 6, 155, 192
text_bounds = 735, 125
text_offset = 365, 1155
avatar_bounds = 282, 282
avatar_offset = 49, 1131
class AvatarFetchError(Exception):
pass
def draw_text(text, color, fit_size):
text_size = museo.getsize(text)
img = Image.new('RGBA', text_size, color + (0,))
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, color + (255,), font=museo)
width, height = img.size
fit_width, fit_height = fit_size
width_ratio = Fraction(width, fit_width)
height_ratio = Fraction(height, fit_height)
if width_ratio > height_ratio:
new_size = fit_width, int(height / width_ratio)
else:
new_size = int(width / height_ratio), fit_height
return img.resize(new_size, Image.ANTIALIAS)
def center(size, fit_size, offset):
w, h = size
fw, fh = fit_size
x, y = offset
return x + (fw - w) // 2, y + (fh - h) // 2
logo_pos = center(logo_stamp.size, qr_size, qr_offset)
def weasyl_badge(username, avatar_resizing=Image.ANTIALIAS):
r = requests.get(
'https://www.weasyl.com/api/useravatar', params={'username': username})
resp = r.json()
if resp['error']['code'] != 0:
raise AvatarFetchError(resp['error'])
back = badge_back.copy()
qr = qrcode.QRCode(
error_correction=qrcode.constants.ERROR_CORRECT_H, border=1)
qr.add_data('https://weasyl.com/~%s' % (username,))
qr_mask = qr.make_image().resize(qr_size)
back.paste((255, 255, 255, 255), qr_offset, qr_mask)
back.paste(logo_stamp, logo_pos, logo_stamp)
text = draw_text(username, name_color, text_bounds)
text_pos = center(text.size, text_bounds, text_offset)
back.paste(text, text_pos, text)
avatar = Image.open(StringIO(requests.get(resp['avatar']).content))
avatar = avatar.resize(avatar_bounds, avatar_resizing).convert('RGBA')
back.paste(avatar, avatar_offset, avatar)
return back
| Python | 0.000002 |
90678692ec85ec90d454b1a3b255dae834bb24ba | trim space | tests/mocks/postgres.py | tests/mocks/postgres.py | from psycopg2.extensions import connection, cursor
class MockConnection(connection):
def __init__(self, *args, **kwargs):
self._cursor = MockCursor()
def cursor(self, *args, **kwargs):
return self._cursor
class MockCursor(cursor):
def __init__(self, *args, **kwargs):
self.queries = []
def execute(self, query, *args, **kwargs):
self.queries.append(query)
def fetchall(self, *args, **kwargs):
return []
def fetchone(self, *args, **kwargs):
return None
__all__ = ["MockConnection"] |
from psycopg2.extensions import connection, cursor
class MockConnection(connection):
def __init__(self, *args, **kwargs):
self._cursor = MockCursor()
def cursor(self, *args, **kwargs):
return self._cursor
class MockCursor(cursor):
def __init__(self, *args, **kwargs):
self.queries = []
def execute(self, query, *args, **kwargs):
self.queries.append(query)
def fetchall(self, *args, **kwargs):
return []
def fetchone(self, *args, **kwargs):
return None
__all__ = ["MockConnection"] | Python | 0.000001 |
4c5ebbabcf54b1f23459da7ddf85adf5e5de22d8 | Update add-lore.py to serve legacy lorebot needs | add-lore.py | add-lore.py | #!/usr/bin/env python3
import argparse
import datetime
from peewee import peewee
db = peewee.SqliteDatabase(None)
class BaseModel(peewee.Model):
class Meta:
database = db
class Lore(BaseModel):
time = peewee.DateTimeField(null=True, index=True)
author = peewee.CharField(null=True, index=True)
lore = peewee.CharField()
rating = peewee.FloatField()
def __str__(self):
return "[%s] [%s]\n%s" % (self.time, self.author, self.lore)
def main():
lore_file = 'lore.db'
db.init(lore_file)
parser = argparse.ArgumentParser()
parser.add_argument('lore', nargs='+', help="blob of lore to save")
args = parser.parse_args()
lore = ' '.join(args.lore)
# add-lore "[mike_bloomfield]: how do i use the lorebot "
t = datetime.datetime.now()
# Try to parse plain loreblob, extracting author from []
author = lore.split(': ')[0].split('[')[1].split(']')[0]
loretxt = ': '.join(lore.split(': ')[1:])
db.begin()
# Check to see if lore already exists (based on author/lore match)
matches = Lore.select().where(Lore.author == author and Lore.lore == lore).count()
if matches == 0:
l = Lore.create(time=t, author=author, lore=loretxt, rating=0)
print(l)
else:
print("Lore already exists...")
db.commit()
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
import argparse
import datetime
from peewee import peewee
db = peewee.SqliteDatabase(None)
class BaseModel(peewee.Model):
class Meta:
database = db
class Lore(BaseModel):
time = peewee.DateTimeField(null=True, index=True)
author = peewee.CharField(null=True, index=True)
lore = peewee.CharField()
rating = peewee.FloatField()
def main():
lore_file = 'lore.db'
db.init(lore_file)
parser = argparse.ArgumentParser()
parser.add_argument('author')
parser.add_argument('lore', nargs='+', help="blob of lore to save")
args = parser.parse_args()
print(args.lore)
t = datetime.datetime.now()
# Try to parse plain loreblob, extracting author from []
author = args.author
lore = ' '.join(args.lore)
print(author, lore)
db.begin()
# Check to see if lore already exists (based on author/lore match)
matches = Lore.select().where(Lore.author == author and Lore.lore == lore).count()
if matches == 0:
Lore.create(time=t, author=author, lore=lore, rating=0)
db.commit()
if __name__ == "__main__":
main()
| Python | 0 |
5f21305d1736322064aa9f5e503965b102a6c086 | Add Ending class | hairball/plugins/neu.py | hairball/plugins/neu.py | """This module provides plugins for NEU metrics."""
import kurt
from hairball.plugins import HairballPlugin
from PIL import Image
import os
class Variables(HairballPlugin):
"""Plugin that counts the number of variables in a project."""
def __init__(self):
super(Variables, self).__init__()
self.total = 0
def finalize(self):
"""Output the number of variables in the project."""
print("Number of variables: %i" % self.total)
def analyze(self, scratch):
"""Run and return the results of the Variables plugin."""
self.total = len(scratch.variables)
for sprite in scratch.sprites:
self.total += len(sprite.variables)
class Lists(HairballPlugin):
"""Plugin that counts the number of lists in a project."""
def __init__(self):
super(Lists, self).__init__()
self.total = 0
def finalize(self):
"""Output the number of lists in the project."""
print("Number of lists: %i" % self.total)
def analyze(self, scratch):
"""Run and return the results of the Lists plugin."""
self.total = len(scratch.lists)
for sprite in scratch.sprites:
self.total += len(sprite.lists)
class BlockCounts(HairballPlugin):
"""Plugin that keeps track of the number of blocks in a project."""
def __init__(self):
super(BlockCounts, self).__init__()
self.blocks = 0
def finalize(self):
"""Output the aggregate block count results."""
print("Number of blocks %i" % self.blocks)
def analyze(self, scratch):
"""Run and return the results from the BlockCounts plugin."""
for script in self.iter_scripts(scratch):
for block in self.iter_blocks(script.blocks):
self.blocks += 1
class Colors(HairballPlugin):
"""Plugin that keeps track of the colors of the stage images."""
def __init__(self):
self.colors ={}
def finalize(self):
"""Output the aggregate block count results."""
print self.colors
def compute_average_image_color(self, img):
"""
Compute the most frequent color in img.
Code adapted from
http://blog.zeevgilovitz.com/detecting-dominant-colours-in-python/
"""
image = Image.open(img)
w, h = image.size
pixels = image.getcolors(w * h)
most_frequent_pixel = pixels[0]
for count, colour in pixels:
if count > most_frequent_pixel[0]:
most_frequent_pixel = (count, colour)
rgb = []
for i in range(3):
rgb.append (most_frequent_pixel[1][i])
trgb = tuple(rgb)
trgb = '#%02x%02x%02x' % trgb #Transform rgb to Hex color (HTML)
return trgb
def analyze(self, scratch):
"""Run and return the results from the BlockCounts plugin."""
#ToDo: get the images from stage and characters
class Ending(HairballPlugin):
"""Plugin that checks if the project seems to end."""
def __init__(self):
super(Ending, self).__init__()
self.total = 0
def finalize(self):
"""Output whether the project seems to end or not."""
if self.total > 0:
print "The game seems to end at some point"
else:
print "The game seems to not ever end"
def analyze(self, scratch):
"""Run and return the results of the Ending plugin."""
for script in self.iter_scripts(scratch):
for name, _, _ in self.iter_blocks(script.blocks):
if name == "stop %s":
self.total += 1
| """This module provides plugins for NEU metrics."""
import kurt
from hairball.plugins import HairballPlugin
from PIL import Image
import os
class Variables(HairballPlugin):
"""Plugin that counts the number of variables in a project."""
def __init__(self):
super(Variables, self).__init__()
self.total = 0
def finalize(self):
"""Output the number of variables in the project."""
print("Number of variables: %i" % self.total)
def analyze(self, scratch):
"""Run and return the results of the Variables plugin."""
self.total = len(scratch.variables)
for x in scratch.sprites:
self.total += len(x.variables)
class Lists(HairballPlugin):
"""Plugin that counts the number of lists in a project."""
def __init__(self):
super(Lists, self).__init__()
self.total = 0
def finalize(self):
"""Output the number of lists in the project."""
print("Number of lists: %i" % self.total)
def analyze(self, scratch):
"""Run and return the results of the Lists plugin."""
self.total = len(scratch.lists)
for x in scratch.sprites:
self.total += len(x.lists)
class BlockCounts(HairballPlugin):
"""Plugin that keeps track of the number of blocks in a project."""
def __init__(self):
super(BlockCounts, self).__init__()
self.blocks = 0
def finalize(self):
"""Output the aggregate block count results."""
print("Number of blocks %i" % self.blocks)
def analyze(self, scratch):
"""Run and return the results from the BlockCounts plugin."""
for script in self.iter_scripts(scratch):
for b in self.iter_blocks(script.blocks):
self.blocks += 1
class Colors(HairballPlugin):
"""Plugin that keeps track of the colors of the stage images."""
def __init__(self):
self.colors ={}
def finalize(self):
"""Output the aggregate block count results."""
print self.colors
def compute_average_image_color(self, img):
"""
Compute the most frequent color in img.
Code adapted from
http://blog.zeevgilovitz.com/detecting-dominant-colours-in-python/
"""
image = Image.open(img)
w, h = image.size
pixels = image.getcolors(w * h)
most_frequent_pixel = pixels[0]
for count, colour in pixels:
if count > most_frequent_pixel[0]:
most_frequent_pixel = (count, colour)
rgb = []
for i in range(3):
rgb.append (most_frequent_pixel[1][i])
trgb = tuple(rgb)
trgb = '#%02x%02x%02x' % trgb #Transform rgb to Hex color (HTML)
return trgb
def analyze(self, scratch):
"""Run and return the results from the BlockCounts plugin."""
#ToDo: get the images from stage and characters | Python | 0.000011 |
6ed282bb2da04790e6e399faad4d2ba8dfc214c4 | add v0.20210330 (#28111) | var/spack/repos/builtin/packages/ccls/package.py | var/spack/repos/builtin/packages/ccls/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ccls(CMakePackage):
"""C/C++ language server"""
homepage = "https://github.com/MaskRay/ccls"
git = "https://github.com/MaskRay/ccls.git"
url = "https://github.com/MaskRay/ccls/archive/0.20201025.tar.gz"
maintainers = ['jacobmerson']
version('0.20210330', sha256='28c228f49dfc0f23cb5d581b7de35792648f32c39f4ca35f68ff8c9cb5ce56c2')
version('0.20201025', sha256='1470797b2c1a466e2d8a069efd807aac6fefdef8a556e1edf2d44f370c949221')
variant('build_type', default='Release', description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
depends_on("cmake@3.8:", type="build")
depends_on('llvm@7:')
depends_on('rapidjson')
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ccls(CMakePackage):
"""C/C++ language server"""
homepage = "https://github.com/MaskRay/ccls"
git = "https://github.com/MaskRay/ccls.git"
url = "https://github.com/MaskRay/ccls/archive/0.20201025.tar.gz"
maintainers = ['jacobmerson']
version('0.20201025', sha256='1470797b2c1a466e2d8a069efd807aac6fefdef8a556e1edf2d44f370c949221')
variant('build_type', default='Release', description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
depends_on("cmake@3.8:", type="build")
depends_on('llvm@7:')
depends_on('rapidjson')
| Python | 0 |
f230e69780823f4ceb48a68015cd5bd4af94cba0 | Add in some settings for email | ml_service_api/aws.py | ml_service_api/aws.py | """
Deployment settings file
"""
from settings import *
import json
DEBUG=False
TIME_BETWEEN_INDEX_REBUILDS = 60 * 30 # seconds
#Tastypie throttle settings
THROTTLE_AT = 100 #Throttle requests after this number in below timeframe
THROTTLE_TIMEFRAME= 60 * 60 #Timeframe in which to throttle N requests, seconds
THROTTLE_EXPIRATION= 24 * 60 * 60 # When to remove throttle entries from cache, seconds
with open(os.path.join(ENV_ROOT,"env.json")) as env_file:
ENV_TOKENS = json.load(env_file)
with open(os.path.join(ENV_ROOT, "auth.json")) as auth_file:
AUTH_TOKENS = json.load(auth_file)
DATABASES = AUTH_TOKENS.get('DATABASES', DATABASES)
CACHES = AUTH_TOKENS.get('CACHES', CACHES)
AWS_ACCESS_KEY_ID = AUTH_TOKENS.get('AWS_ACCESS_KEY_ID', AWS_ACCESS_KEY_ID)
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS.get('AWS_SECRET_ACCESS_KEY', AWS_SECRET_ACCESS_KEY)
USE_S3_TO_STORE_MODELS = ENV_TOKENS.get('USE_S3_TO_STORE_MODELS', USE_S3_TO_STORE_MODELS)
S3_BUCKETNAME = ENV_TOKENS.get('S3_BUCKETNAME', S3_BUCKETNAME)
BROKER_URL = AUTH_TOKENS.get('BROKER_URL', BROKER_URL)
CELERY_RESULT_BACKEND = AUTH_TOKENS.get('CELERY_RESULT_BACKEND', CELERY_RESULT_BACKEND)
ELB_HOSTNAME = ENV_TOKENS.get('ELB_HOSTNAME', None)
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
AWS_SES_REGION_NAME = ENV_TOKENS.get('AWS_SES_REGION_NAME', None)
if AWS_SES_REGION_NAME is not None:
AWS_SES_REGION_ENDPOINT = 'email.{0}.amazonaws.com'.format(AWS_SES_REGION_NAME)
if ELB_HOSTNAME is not None:
ALLOWED_HOSTS += [ELB_HOSTNAME] | """
Deployment settings file
"""
from settings import *
import json
DEBUG=False
TIME_BETWEEN_INDEX_REBUILDS = 60 * 30 # seconds
#Tastypie throttle settings
THROTTLE_AT = 100 #Throttle requests after this number in below timeframe
THROTTLE_TIMEFRAME= 60 * 60 #Timeframe in which to throttle N requests, seconds
THROTTLE_EXPIRATION= 24 * 60 * 60 # When to remove throttle entries from cache, seconds
with open(os.path.join(ENV_ROOT,"env.json")) as env_file:
ENV_TOKENS = json.load(env_file)
with open(os.path.join(ENV_ROOT, "auth.json")) as auth_file:
AUTH_TOKENS = json.load(auth_file)
DATABASES = AUTH_TOKENS.get('DATABASES', DATABASES)
CACHES = AUTH_TOKENS.get('CACHES', CACHES)
AWS_ACCESS_KEY_ID = AUTH_TOKENS.get('AWS_ACCESS_KEY_ID', AWS_ACCESS_KEY_ID)
AWS_SECRET_ACCESS_KEY = AUTH_TOKENS.get('AWS_SECRET_ACCESS_KEY', AWS_SECRET_ACCESS_KEY)
USE_S3_TO_STORE_MODELS = ENV_TOKENS.get('USE_S3_TO_STORE_MODELS', USE_S3_TO_STORE_MODELS)
S3_BUCKETNAME = ENV_TOKENS.get('S3_BUCKETNAME', S3_BUCKETNAME)
BROKER_URL = AUTH_TOKENS.get('BROKER_URL', BROKER_URL)
CELERY_RESULT_BACKEND = AUTH_TOKENS.get('CELERY_RESULT_BACKEND', CELERY_RESULT_BACKEND)
ELB_HOSTNAME = ENV_TOKENS.get('ELB_HOSTNAME', None)
EMAIL_BACKEND = ENV_TOKENS.get('EMAIL_BACKEND', EMAIL_BACKEND)
if ELB_HOSTNAME is not None:
ALLOWED_HOSTS += [ELB_HOSTNAME] | Python | 0.000001 |
e4fff83666ee6e3ce63145f84a550f6fb361096d | Fix Enum hack situation | nycodex/db.py | nycodex/db.py | import enum
import os
import typing
import sqlalchemy
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base() # type: typing.Any
engine = sqlalchemy.create_engine(os.environ["DATABASE_URI"])
Session = sqlalchemy.orm.sessionmaker(bind=engine)
@enum.unique
class DomainCategory(enum.Enum):
BIGAPPS = "NYC BigApps"
BUSINESS = "Business"
CITY_GOVERNMENT = "City Government"
EDUCATION = "Education"
ENVIRONMENT = "Environment"
HEALTH = "Health"
HOUSING_DEVELOPMENT = "Housing & Development"
PUBLIC_SAFETY = "Public Safety"
RECREATION = "Recreation"
SOCIAL_SERVICES = "Social Services"
TRANSPORTATION = "Transportation"
@enum.unique
class AssetType(enum.Enum):
CALENDAR = 'calendar'
CHART = 'chart'
DATALENS = 'datalens'
DATASET = 'dataset'
FILE = 'file'
FILTER = 'filter'
HREF = 'href'
MAP = 'map'
class DbMixin:
__table__: sqlalchemy.Table
@classmethod
def upsert(cls, conn: sqlalchemy.engine.base.Connection,
instances: typing.Iterable["DbMixin"]) -> None:
keys = cls.__table__.c.keys()
for instance in instances:
data = {key: getattr(instance, key) for key in keys}
insert = (postgresql.insert(cls.__table__).values(**data)
.on_conflict_do_update(
index_elements=[cls.__table__.c.id],
set_={k: data[k]
for k in data if k != 'id'}))
conn.execute(insert)
def __eq__(self, other):
keys = self.__table__.c.keys()
return ({key: getattr(self, key) for key in keys}
== {key: getattr(other, key) for key in keys}) # yapf: disable
def sql_enum(enum: typing.Type[enum.Enum]):
return type(enum.__name__, (), {
"__members__": {v.value: v for v in enum.__members__.values()}
}) # yapf: disable
class Dataset(Base, DbMixin):
__tablename__ = "dataset"
id = sqlalchemy.Column(sqlalchemy.CHAR(9), primary_key=True)
name = sqlalchemy.Column(sqlalchemy.VARCHAR, nullable=False)
description = sqlalchemy.Column(sqlalchemy.TEXT, nullable=False)
is_official = sqlalchemy.Column(sqlalchemy.BOOLEAN, nullable=False)
owner_id = sqlalchemy.Column(
sqlalchemy.CHAR(9), sqlalchemy.ForeignKey("owner.id"))
updated_at = sqlalchemy.Column(
sqlalchemy.TIMESTAMP(timezone=True), nullable=False)
scraped_at = sqlalchemy.Column(
sqlalchemy.TIMESTAMP(timezone=True), nullable=True)
domain_category = sqlalchemy.Column(
postgresql.ENUM(sql_enum(DomainCategory), name="DomainCategory"),
nullable=True)
asset_type = sqlalchemy.Column(
postgresql.ENUM(sql_enum(AssetType), name="AssetType"), nullable=True)
class Owner(Base, DbMixin):
__tablename__ = "owner"
id = sqlalchemy.Column(sqlalchemy.CHAR(9), primary_key=True)
name = sqlalchemy.Column(sqlalchemy.TEXT, nullable=False)
| from enum import Enum
import os
import typing
import sqlalchemy
from sqlalchemy.dialects import postgresql
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base() # type: typing.Any
engine = sqlalchemy.create_engine(os.environ["DATABASE_URI"])
Session = sqlalchemy.orm.sessionmaker(bind=engine)
class DomainCategory(Enum):
BIGAPPS = "NYC BigApps"
BUSINESS = "Business"
CITY_GOVERNMENT = "City Government"
EDUCATION = "Education"
ENVIRONMENT = "Environment"
HEALTH = "Health"
HOUSING_DEVELOPMENT = "Housing & Development"
PUBLIC_SAFETY = "Public Safety"
RECREATION = "Recreation"
SOCIAL_SERVICES = "Social Services"
TRANSPORTATION = "Transportation"
class AssetType(Enum):
CALENDAR = 'calendar'
CHART = 'chart'
DATALENS = 'datalens'
DATASET = 'dataset'
FILE = 'file'
FILTER = 'filter'
HREF = 'href'
MAP = 'map'
class DbMixin():
__table__: sqlalchemy.Table
@classmethod
def upsert(cls, conn: sqlalchemy.engine.base.Connection,
instances: typing.Iterable["DbMixin"]) -> None:
keys = cls.__table__.c.keys()
for instance in instances:
data = {key: getattr(instance, key) for key in keys}
insert = (postgresql.insert(cls.__table__).values(**data)
.on_conflict_do_update(
index_elements=[cls.__table__.c.id],
set_={k: data[k]
for k in data if k != 'id'}))
conn.execute(insert)
def __eq__(self, other):
keys = self.__table__.c.keys()
return ({key: getattr(self, key)
for key in keys} == {
key: getattr(other, key)
for key in keys
})
class Dataset(Base, DbMixin):
__tablename__ = "dataset"
id = sqlalchemy.Column(sqlalchemy.CHAR(9), primary_key=True)
name = sqlalchemy.Column(sqlalchemy.VARCHAR, nullable=False)
description = sqlalchemy.Column(sqlalchemy.TEXT, nullable=False)
is_official = sqlalchemy.Column(sqlalchemy.BOOLEAN, nullable=False)
owner_id = sqlalchemy.Column(
sqlalchemy.CHAR(9), sqlalchemy.ForeignKey("owner.id"))
updated_at = sqlalchemy.Column(
sqlalchemy.TIMESTAMP(timezone=True), nullable=False)
scraped_at = sqlalchemy.Column(
sqlalchemy.TIMESTAMP(timezone=True), nullable=True)
domain_category = sqlalchemy.Column(
postgresql.ENUM(
* [v.value for v in DomainCategory.__members__.values()],
name="DomainCategory"),
nullable=True)
asset_type = sqlalchemy.Column(
postgresql.ENUM(
* [v.value for v in AssetType.__members__.values()],
name="AssetType"),
nullable=True)
class Owner(Base, DbMixin):
__tablename__ = "owner"
id = sqlalchemy.Column(sqlalchemy.CHAR(9), primary_key=True)
name = sqlalchemy.Column(sqlalchemy.TEXT, nullable=False)
| Python | 0.000013 |
6ac28c1daa0173ae5baa66c9cb020e9c673973ff | Add info for lftp@4.8.1 (#5452) | var/spack/repos/builtin/packages/lftp/package.py | var/spack/repos/builtin/packages/lftp/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Lftp(AutotoolsPackage):
"""LFTP is a sophisticated file transfer program supporting a number
of network protocols (ftp, http, sftp, fish, torrent)."""
homepage = "http://lftp.yar.ru/"
url = "http://lftp.yar.ru/ftp/lftp-4.7.7.tar.gz"
version('4.8.1', '419b27c016d968a0226b2e5df1454c22')
version('4.7.7', 'ddc71b3b11a1af465e829075ae14b3ff')
depends_on('expat')
depends_on('libiconv')
depends_on('ncurses')
depends_on('openssl')
depends_on('readline')
depends_on('zlib')
def configure_args(self):
return [
'--with-expat={0}'.format(self.spec['expat'].prefix),
'--with-libiconv={0}'.format(self.spec['libiconv'].prefix),
'--with-openssl={0}'.format(self.spec['openssl'].prefix),
'--with-readline={0}'.format(self.spec['readline'].prefix),
'--with-zlib={0}'.format(self.spec['zlib'].prefix),
'--disable-dependency-tracking',
]
| ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Lftp(AutotoolsPackage):
"""LFTP is a sophisticated file transfer program supporting a number
of network protocols (ftp, http, sftp, fish, torrent)."""
homepage = "http://lftp.yar.ru/"
url = "http://lftp.yar.ru/ftp/lftp-4.7.7.tar.gz"
version('4.7.7', 'ddc71b3b11a1af465e829075ae14b3ff')
depends_on('expat')
depends_on('libiconv')
depends_on('ncurses')
depends_on('openssl')
depends_on('readline')
depends_on('zlib')
def configure_args(self):
return [
'--with-expat={0}'.format(self.spec['expat'].prefix),
'--with-libiconv={0}'.format(self.spec['libiconv'].prefix),
'--with-openssl={0}'.format(self.spec['openssl'].prefix),
'--with-readline={0}'.format(self.spec['readline'].prefix),
'--with-zlib={0}'.format(self.spec['zlib'].prefix),
'--disable-dependency-tracking',
]
| Python | 0 |
e2be8a486c9d13f98d9f14ae7b0cddf8225cf1b3 | Add boolswitch test | test/test_apv_rename.py | test/test_apv_rename.py | """
Copyright (c) 2017, Michael Sonntag (sonntag@bio.lmu.de)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted under the terms of the BSD License. See
LICENSE file in the root of the project.
"""
import os
import shutil
import tempfile
import unittest
import uuid
from bren.bulk_rename import BulkRename
class RenameTest(unittest.TestCase):
def setUp(self):
dir_name = "bren_%s" % str(uuid.uuid1())
self.tmpdir = os.path.join(tempfile.gettempdir(), dir_name)
if not os.path.isdir(self.tmpdir):
os.makedirs(self.tmpdir)
for i in range(0, 3):
tmp_file = "tmpfile_%s.jpg" % i
open(os.path.join(self.tmpdir, tmp_file), 'a').close()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_tmp_files(self):
self.assertEqual(len(os.listdir(self.tmpdir)), 3)
def test_bool_switch(self):
self.assertEqual(BulkRename._bool_switch(1), True)
self.assertEqual(BulkRename._bool_switch(2), False)
| """
Copyright (c) 2017, Michael Sonntag (sonntag@bio.lmu.de)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted under the terms of the BSD License. See
LICENSE file in the root of the project.
"""
import os
import shutil
import tempfile
import unittest
import uuid
class RenameTest(unittest.TestCase):
def setUp(self):
dir_name = "bren_%s" % str(uuid.uuid1())
self.tmpdir = os.path.join(tempfile.gettempdir(), dir_name)
if not os.path.isdir(self.tmpdir):
os.makedirs(self.tmpdir)
for i in range(0, 3):
tmp_file = "tmpfile_%s.jpg" % i
open(os.path.join(self.tmpdir, tmp_file), 'a').close()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_tmp_files(self):
self.assertEqual(len(os.listdir(self.tmpdir)), 3)
| Python | 0.000001 |
59faede78ad5d763c7c9fa1763e3e7cac67c1ca6 | Move Circle inside CxDeriv | cxroots/CxDerivative.py | cxroots/CxDerivative.py | from __future__ import division
import numpy as np
from numpy import inf, pi
import scipy.integrate
import math
def CxDeriv(f, contour=None):
"""
Compute derivaive of an analytic function using Cauchy's Integral Formula for Derivatives
"""
if contour is None:
from cxroots.Contours import Circle
C = lambda z0: Circle(z0, 1e-3)
else:
C = lambda z0: contour
def df(z0, n):
integrand = lambda z: f(z)/(z-z0)**(n+1)
return C(z0).integrate(integrand) * math.factorial(n)/(2j*pi)
return np.vectorize(df)
| from __future__ import division
import numpy as np
from numpy import inf, pi
import scipy.integrate
import math
from cxroots.Contours import Circle, Rectangle
def CxDeriv(f, contour=None):
"""
Compute derivaive of an analytic function using Cauchy's Integral Formula for Derivatives
"""
if contour is None:
C = lambda z0: Circle(z0, 1e-3)
else:
C = lambda z0: contour
def df(z0, n):
integrand = lambda z: f(z)/(z-z0)**(n+1)
return C(z0).integrate(integrand) * math.factorial(n)/(2j*pi)
return np.vectorize(df)
| Python | 0 |
2df7947f02fd39e05bf18a89f904e273d17c63ca | add v0.9.29 (#23606) | var/spack/repos/builtin/packages/lmdb/package.py | var/spack/repos/builtin/packages/lmdb/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Lmdb(MakefilePackage):
"""Symas LMDB is an extraordinarily fast, memory-efficient database we
developed for the Symas OpenLDAP Project. With memory-mapped files, it
has the read performance of a pure in-memory database while retaining
the persistence of standard disk-based databases."""
homepage = "https://lmdb.tech/"
url = "https://github.com/LMDB/lmdb/archive/LMDB_0.9.21.tar.gz"
version('0.9.29', sha256='22054926b426c66d8f2bc22071365df6e35f3aacf19ad943bc6167d4cae3bebb')
version('0.9.24', sha256='44602436c52c29d4f301f55f6fd8115f945469b868348e3cddaf91ab2473ea26')
version('0.9.22', sha256='f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28')
version('0.9.21', sha256='1187b635a4cc415bb6972bba346121f81edd996e99b8f0816151d4090f90b559')
version('0.9.16', sha256='49d7b40949f2ced9bc8b23ea6a89e75471a1c9126537a8b268c318a00b84322b')
build_directory = 'libraries/liblmdb'
@property
def install_targets(self):
return ['prefix={0}'.format(self.prefix), 'install']
@run_after('install')
def install_pkgconfig(self):
mkdirp(self.prefix.lib.pkgconfig)
with open(join_path(self.prefix.lib.pkgconfig, 'lmdb.pc'), 'w') as f:
f.write('prefix={0}\n'.format(self.prefix))
f.write('exec_prefix=${prefix}\n')
f.write('libdir={0}\n'.format(self.prefix.lib))
f.write('includedir={0}\n'.format(self.prefix.include))
f.write('\n')
f.write('Name: LMDB\n')
f.write('Description: Symas LMDB is an extraordinarily fast, '
'memory-efficient database.\n')
f.write('Version: {0}\n'.format(self.spec.version))
f.write('Cflags: -I${includedir}\n')
f.write('Libs: -L${libdir} -llmdb\n')
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Lmdb(MakefilePackage):
"""Symas LMDB is an extraordinarily fast, memory-efficient database we
developed for the Symas OpenLDAP Project. With memory-mapped files, it
has the read performance of a pure in-memory database while retaining
the persistence of standard disk-based databases."""
homepage = "https://lmdb.tech/"
url = "https://github.com/LMDB/lmdb/archive/LMDB_0.9.21.tar.gz"
version('0.9.24', sha256='44602436c52c29d4f301f55f6fd8115f945469b868348e3cddaf91ab2473ea26')
version('0.9.22', sha256='f3927859882eb608868c8c31586bb7eb84562a40a6bf5cc3e13b6b564641ea28')
version('0.9.21', sha256='1187b635a4cc415bb6972bba346121f81edd996e99b8f0816151d4090f90b559')
version('0.9.16', sha256='49d7b40949f2ced9bc8b23ea6a89e75471a1c9126537a8b268c318a00b84322b')
build_directory = 'libraries/liblmdb'
@property
def install_targets(self):
return ['prefix={0}'.format(self.prefix), 'install']
@run_after('install')
def install_pkgconfig(self):
mkdirp(self.prefix.lib.pkgconfig)
with open(join_path(self.prefix.lib.pkgconfig, 'lmdb.pc'), 'w') as f:
f.write('prefix={0}\n'.format(self.prefix))
f.write('exec_prefix=${prefix}\n')
f.write('libdir={0}\n'.format(self.prefix.lib))
f.write('includedir={0}\n'.format(self.prefix.include))
f.write('\n')
f.write('Name: LMDB\n')
f.write('Description: Symas LMDB is an extraordinarily fast, '
'memory-efficient database.\n')
f.write('Version: {0}\n'.format(self.spec.version))
f.write('Cflags: -I${includedir}\n')
f.write('Libs: -L${libdir} -llmdb\n')
| Python | 0 |
7abc503d6aa492f2340ab0b98d1f66892180ba19 | Fix some test error | tests/test_blueprint.py | tests/test_blueprint.py | from wood import Wood
from wood.support import Blueprint
def make_example_blueprint():
b = Blueprint()
b.empty(r"/example","example")
return b
def test_blueprint_can_add_empty_handler():
b = make_example_blueprint()
assert b != None
def test_blueprint_can_add_handlers_to_wood():
w = Wood()
b = make_example_blueprint()
b.to(w)
assert len(w.application.handlers) > 0
def test_blueprint_can_get_new_wood():
b = make_example_blueprint()
w = b.get_wood()
assert len(w.application.handlers) > 0
| from wood import Wood
from wood.support import Blueprint
def make_example_blueprint():
b = Blueprint()
b.empty(r"/example","example")
return b
def test_blueprint_can_add_empty_handler():
b = make_example_blueprint()
assert b != None
def test_blueprint_can_add_handlers_to_wood():
w = Wood()
b = make_example_blueprint()
b.to(w)
assert len(w.application.handlers) > 0
def test_blueprint_can_get_new_wood():
b = make_example_blueprint()
w.get_wood()
assert len(w.application.handlers) > 0
| Python | 0.002705 |
d5438347980b4ed3f4a798b8c1019b87691f28bd | Bump version | oi/version.py | oi/version.py | VERSION = '0.2.1'
| VERSION = '0.2.0'
| Python | 0 |
676440a464d695146361eb1bdb684e121bf41a42 | fix simple_date parsing | solution/__init__.py | solution/__init__.py | # coding=utf-8
"""
=============================
Solution
=============================
An amazing form solution
:copyright: `Juan-Pablo Scaletti <http://jpscaletti.com>`_.
:license: MIT, see LICENSE for more details.
"""
from .form import Form # noqa
from .formset import FormSet # noqa
from .fields import * # noqa
from .validators import * # noqa
from .utils import Markup, get_html_attrs, to_unicode # noqa
__version__ = '5.2.6'
| # coding=utf-8
"""
=============================
Solution
=============================
An amazing form solution
:copyright: `Juan-Pablo Scaletti <http://jpscaletti.com>`_.
:license: MIT, see LICENSE for more details.
"""
from .form import Form # noqa
from .formset import FormSet # noqa
from .fields import * # noqa
from .validators import * # noqa
from .utils import Markup, get_html_attrs, to_unicode # noqa
__version__ = '5.2.5'
| Python | 0.000345 |
c9f634ccea4b034907bb403a683945ace373c97d | Add unary + - support for atoms | solver/core/atoms.py | solver/core/atoms.py | class Atom(object):
""" Base class for any atomic type
"""
def __pos__(self):
return self
def __neg__(self):
return -1 * self
def __add__(self, other):
from .operations import Add
return Add(self, other)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, other):
from .operations import Mul
return Mul(self, other)
def __rmul__(self, other):
return self.__mul__(other)
def __pow__(self, power, modulo=None):
from .operations import Pow
return Pow(self, power)
def __div__(self, other):
from .operations import Div
return Div(self, other)
def __repr__(self):
try:
return '{}({})'.format(self.__class__.__name__, self.args)
except AttributeError:
return '{}'.format(self.__class__.__name__)
class Undefined(Atom):
pass
class Number(Atom):
def __new__(cls, *args):
if len(args) == 1:
if isinstance(args[0], Number):
return args[0]
if isinstance(args[0], int):
return super(Number, cls).__new__(Integer)
class Integer(Number):
def __init__(self, value):
self.value = value
def __add__(self, other):
if isinstance(other, Integer):
return Integer(self.value + other.value)
else:
Atom.__mul__(other)
def __iadd__(self, other):
return self + other
def __mul__(self, other):
if isinstance(other, Integer):
return Integer(self.value * other.value)
else:
Atom.__mul__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __pow__(self, power, modulo=None):
if isinstance(power, Integer):
if power.value == 0 and self.value == 0:
return Undefined()
return Integer(self.value ** power.value)
else:
Atom.__mul__(power)
def __eq__(self, other):
if isinstance(other, Integer):
return self.value == other.value
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
| class Atom(object):
""" Base class for any atomic type
"""
def __add__(self, other):
from .operations import Add
return Add(self, other)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, other):
from .operations import Mul
return Mul(self, other)
def __rmul__(self, other):
return self.__mul__(other)
def __pow__(self, power, modulo=None):
from .operations import Pow
return Pow(self, power)
def __div__(self, other):
from .operations import Div
return Div(self, other)
def __repr__(self):
try:
return '{}({})'.format(self.__class__.__name__, self.args)
except AttributeError:
return '{}'.format(self.__class__.__name__)
class Undefined(Atom):
pass
class Number(Atom):
def __new__(cls, *args):
if len(args) == 1:
if isinstance(args[0], Number):
return args[0]
if isinstance(args[0], int):
return super(Number, cls).__new__(Integer)
class Integer(Number):
def __init__(self, value):
self.value = value
def __add__(self, other):
if isinstance(other, Integer):
return Integer(self.value + other.value)
else:
Atom.__mul__(other)
def __iadd__(self, other):
return self + other
def __mul__(self, other):
if isinstance(other, Integer):
return Integer(self.value * other.value)
else:
Atom.__mul__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __pow__(self, power, modulo=None):
if isinstance(power, Integer):
if power.value == 0 and self.value == 0:
return Undefined()
return Integer(self.value ** power.value)
else:
Atom.__mul__(power)
def __eq__(self, other):
if isinstance(other, Integer):
return self.value == other.value
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
| Python | 0.001074 |
f6c6ff376974f604b2b4a7b62ad28fd56a264c55 | Add empty testing scaffolding. | test/test_exceptions.py | test/test_exceptions.py | #!/usr/bin/env python3
import pyglab.exceptions as ex
import unittest as ut
class TestBadRequest(ut.TestCase):
def test_throw(self):
pass
def test_statuscode(self):
pass
def test_message(self):
pass
def test_body(self):
pass
| #!/usr/bin/env python3
import pyglab.exceptions as ex
| Python | 0 |
f13982144a2a0710af8e082dd01d73f036f026fd | Use clean_system fixture on pypackage test | tests/test_pypackage.py | tests/test_pypackage.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pypackage
--------------
Tests formerly known from a unittest residing in test_generate.py named
TestPyPackage.test_cookiecutter_pypackage
"""
from __future__ import unicode_literals
import os
import subprocess
import pytest
from cookiecutter import utils
from tests.skipif_markers import skipif_travis, skipif_no_network
@pytest.fixture(scope='function')
def remove_additional_dirs(request):
"""
Remove special directories which are creating during the tests.
"""
def fin_remove_additional_dirs():
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('boilerplate'):
utils.rmtree('boilerplate')
request.addfinalizer(fin_remove_additional_dirs)
@skipif_travis
@skipif_no_network
@pytest.mark.usefixtures('clean_system', 'remove_additional_dirs')
def test_cookiecutter_pypackage():
"""
Tests that https://github.com/audreyr/cookiecutter-pypackage.git works.
"""
proc = subprocess.Popen(
'git clone https://github.com/audreyr/cookiecutter-pypackage.git',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
proc = subprocess.Popen(
'cookiecutter --no-input cookiecutter-pypackage/',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
assert os.path.isdir('cookiecutter-pypackage')
assert os.path.isfile('boilerplate/README.rst')
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pypackage
--------------
Tests formerly known from a unittest residing in test_generate.py named
TestPyPackage.test_cookiecutter_pypackage
"""
from __future__ import unicode_literals
import os
import subprocess
import pytest
from cookiecutter import utils
from tests.skipif_markers import skipif_travis, skipif_no_network
@pytest.fixture(scope='function')
def remove_additional_dirs(request):
"""
Remove special directories which are creating during the tests.
"""
def fin_remove_additional_dirs():
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('boilerplate'):
utils.rmtree('boilerplate')
request.addfinalizer(fin_remove_additional_dirs)
@skipif_travis
@skipif_no_network
@pytest.mark.usefixtures('remove_additional_dirs')
def test_cookiecutter_pypackage():
"""
Tests that https://github.com/audreyr/cookiecutter-pypackage.git works.
"""
proc = subprocess.Popen(
'git clone https://github.com/audreyr/cookiecutter-pypackage.git',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
proc = subprocess.Popen(
'cookiecutter --no-input cookiecutter-pypackage/',
stdin=subprocess.PIPE,
shell=True
)
proc.wait()
assert os.path.isdir('cookiecutter-pypackage')
assert os.path.isfile('boilerplate/README.rst')
| Python | 0 |
247e8f8ce8ed4677c629affec6b9a291c730e3a2 | Use assert_equal instead of assertEqual in fail testcase. | tests/testcases/fail.py | tests/testcases/fail.py | from systest import TestCase
class FailTest(TestCase):
"""A test that always fails.
"""
count = 0
def __init__(self, name):
super(FailTest, self).__init__()
self.name = "fail_" + name
def run(self):
FailTest.count += 1
self.assert_equal(1, 0)
| from systest import TestCase
class FailTest(TestCase):
"""A test that always fails.
"""
count = 0
def __init__(self, name):
super(FailTest, self).__init__()
self.name = "fail_" + name
def run(self):
FailTest.count += 1
self.assertEqual(1, 0)
| Python | 0 |
3c2d290452a07946880fc25af917b32766f9529d | Update test script to include deposit | testsuite/front_test.py | testsuite/front_test.py | #!/usr/bin/env python2
import gevent
import requests
import json
import time
import hashlib
ip_address = "vm"
port = "3000"
url = ''.join(['http://', ip_address, ':', port])
def secret(params, secret):
keys = params.keys()
keys.sort()
hash_str = ""
for key in keys:
hash_str += (params[key])
md5 = hashlib.md5()
md5.update(hash_str)
return md5.hexdigest()
def test_login(login):
info = login
assert 'username' in info
assert 'secret' in info
assert len(info.keys()) == 2
def pytest_funcarg__login(request):
r = requests.get(url + '/register')
assert r.status_code == 200
info = json.loads(r.text)
assert 'username' in info
assert 'secret' in info
return info
def test_balance(login):
body = {}
body['username'] = login['username']
body['time'] = str(time.time())
body['sign'] = secret(body, login['secret'])
r = requests.post(url + '/balance', data=json.dumps(body))
assert r.status_code == 200
info = json.loads(r.text)
assert 'balance' in info
def test_deposit(login):
body = {}
body['username'] = login['username']
body['time'] = str(time.time())
body['sign'] = secret(body, login['secret'])
r = requests.post(url + '/deposit', data=json.dumps(body), timeout=1)
assert r.status_code == 200
info = json.loads(r.text)
assert 'address' in info
r2 = requests.post(url + '/deposit', data=json.dumps(body), timeout=1)
assert r2.text == r.text
| #!/usr/bin/env python2
import gevent
import requests
import json
import time
import hashlib
ip_address = "vm"
port = "3000"
url = ''.join(['http://', ip_address, ':', port])
def secret(params, secret):
keys = params.keys()
keys.sort()
hash_str = ""
for key in keys:
hash_str += (params[key])
md5 = hashlib.md5()
md5.update(hash_str)
return md5.hexdigest()
def test_login(login):
info = login
assert 'username' in info
assert 'secret' in info
assert len(info.keys()) == 2
def pytest_funcarg__login(request):
r = requests.get(url + '/register')
assert r.status_code == 200
info = json.loads(r.text)
assert 'username' in info
assert 'secret' in info
return info
def test_balance(login):
body = {}
body['username'] = login['username']
body['time'] = str(time.time())
body['sign'] = secret(body, login['secret'])
r = requests.post(url + '/balance', data=json.dumps(body))
print r.text
assert r.status_code == 200
info = json.loads(r.text)
assert 'balance' in info
| Python | 0 |
50c4b312a27b61725885ee84d45a0f07f94a8ec6 | Handle interactive-on-error also when error is from contextmanager exit. | teuthology/run_tasks.py | teuthology/run_tasks.py | import sys
import logging
log = logging.getLogger(__name__)
def _run_one_task(taskname, **kwargs):
submod = taskname
subtask = 'task'
if '.' in taskname:
(submod, subtask) = taskname.rsplit('.', 1)
parent = __import__('teuthology.task', globals(), locals(), [submod], 0)
mod = getattr(parent, submod)
fn = getattr(mod, subtask)
return fn(**kwargs)
def run_tasks(tasks, ctx):
stack = []
try:
for taskdict in tasks:
try:
((taskname, config),) = taskdict.iteritems()
except ValueError:
raise RuntimeError('Invalid task definition: %s' % taskdict)
log.info('Running task %s...', taskname)
manager = _run_one_task(taskname, ctx=ctx, config=config)
if hasattr(manager, '__enter__'):
manager.__enter__()
stack.append(manager)
except Exception, e:
ctx.summary['success'] = False
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = str(e)
log.exception('Saw exception from tasks')
if ctx.config.get('interactive-on-error'):
from .task import interactive
log.warning('Saw failure, going into interactive mode...')
interactive.task(ctx=ctx, config=None)
finally:
try:
exc_info = sys.exc_info()
while stack:
manager = stack.pop()
log.debug('Unwinding manager %s', manager)
try:
suppress = manager.__exit__(*exc_info)
except Exception, e:
ctx.summary['success'] = False
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = str(e)
log.exception('Manager failed: %s', manager)
if exc_info == (None, None, None):
# if first failure is in an __exit__, we don't
# have exc_info set yet
exc_info = sys.exc_info()
if ctx.config.get('interactive-on-error'):
from .task import interactive
log.warning('Saw failure, going into interactive mode...')
interactive.task(ctx=ctx, config=None)
else:
if suppress:
sys.exc_clear()
exc_info = (None, None, None)
if exc_info != (None, None, None):
log.debug('Exception was not quenched, exiting: %s: %s', exc_info[0].__name__, exc_info[1])
raise SystemExit(1)
finally:
# be careful about cyclic references
del exc_info
| import sys
import logging
log = logging.getLogger(__name__)
def _run_one_task(taskname, **kwargs):
submod = taskname
subtask = 'task'
if '.' in taskname:
(submod, subtask) = taskname.rsplit('.', 1)
parent = __import__('teuthology.task', globals(), locals(), [submod], 0)
mod = getattr(parent, submod)
fn = getattr(mod, subtask)
return fn(**kwargs)
def run_tasks(tasks, ctx):
stack = []
try:
for taskdict in tasks:
try:
((taskname, config),) = taskdict.iteritems()
except ValueError:
raise RuntimeError('Invalid task definition: %s' % taskdict)
log.info('Running task %s...', taskname)
manager = _run_one_task(taskname, ctx=ctx, config=config)
if hasattr(manager, '__enter__'):
manager.__enter__()
stack.append(manager)
except Exception, e:
ctx.summary['success'] = False
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = str(e)
log.exception('Saw exception from tasks')
if ctx.config.get('interactive-on-error'):
from .task import interactive
log.warning('Saw failure, going into interactive mode...')
interactive.task(ctx=ctx, config=None)
finally:
try:
exc_info = sys.exc_info()
while stack:
manager = stack.pop()
log.debug('Unwinding manager %s', manager)
try:
suppress = manager.__exit__(*exc_info)
except Exception, e:
ctx.summary['success'] = False
if 'failure_reason' not in ctx.summary:
ctx.summary['failure_reason'] = str(e)
log.exception('Manager failed: %s', manager)
if exc_info == (None, None, None):
# if first failure is in an __exit__, we don't
# have exc_info set yet
exc_info = sys.exc_info()
else:
if suppress:
sys.exc_clear()
exc_info = (None, None, None)
if exc_info != (None, None, None):
log.debug('Exception was not quenched, exiting: %s: %s', exc_info[0].__name__, exc_info[1])
raise SystemExit(1)
finally:
# be careful about cyclic references
del exc_info
| Python | 0 |
e8eaa2e4bd6cc7fe51e3c4c15a5bf392a24d5b92 | generate index | hodge/__init__.py | hodge/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import io
import shutil
import click
from cookiecutter.main import cookiecutter
from slugify import slugify
from jinja2 import Template, Environment, FileSystemLoader
from datetime import datetime
import markdown2
from .utils import walk_dir
@click.group()
def cmds():
pass
@cmds.command()
@click.argument('site_name', type=str)
def init(site_name):
click.echo(u'Hodge init new project...')
repo_name = site_name.lower().replace(' ', '-')
app_name = repo_name.replace("-", "")
extra_context = {
'site_name': site_name,
"repo_name": repo_name,
"app_name": app_name,
"pkg_name": app_name,
"content_folder": "content"
}
cookiecutter(
'https://github.com/avelino/hodge-init.git',
extra_context=extra_context,
no_input=True)
@cmds.command()
def newpost():
if not os.path.isfile("./hodge.toml"):
click.echo(u'hodge.toml (config) not exist!')
exit(0)
click.echo(u'Hodge new post create...')
date = datetime.now()
obj = {}
obj["title"] = click.prompt('Title', type=str)
slug = slugify(obj["title"])
obj["slug"] = click.prompt('Slug', type=str, default=slug)
obj["tags"] = click.prompt('Tags (hodge, static)', type=str,
default=", ".join(obj["title"].split(" ")))
obj["date"] = click.prompt(
'Date', type=str,
default=date.strftime("%Y/%m/%d %H:%M:%S"))
obj["file"] = click.prompt(
"File name",
type=str,
default=date.strftime("%Y_%m_%d_{}.md".format(obj["slug"])))
template_path = os.path.join(os.path.dirname(__file__), "templates")
tmp = Template(io.open(os.path.join(template_path, "newpost.md")).read())
if not os.path.isdir("./content"):
os.mkdir("./content")
with io.open("./content/{}".format(obj["file"]), "wb") as f:
f.write(tmp.render(**obj).encode())
@cmds.command()
def build():
if not os.path.isfile("./hodge.toml"):
click.echo(u'hodge.toml (config) not exist!')
exit(0)
click.echo(u'Hodge build...')
template_path = os.path.join("theme", "default")
env = Environment(autoescape=True,
loader=FileSystemLoader(template_path))
template_content = env.get_template('content.html')
shutil.rmtree("./build", ignore_errors=True)
index = []
for filename in walk_dir("./content"):
text = io.open(filename, "rb").read()
html = markdown2.markdown(text, extras=["metadata"])
meta = html.metadata
content = {"content": html, "meta": meta}
if not os.path.isdir("./build"):
os.mkdir("./build")
with open("./build/{}.html".format(meta.get("slug")), "w") as fh:
fh.write(template_content.render(**content))
click.echo("- {}".format(meta.get("slug")))
index.append(content)
template_index = env.get_template('index.html')
content = {"posts": index}
with open("./build/index.html", "w") as fh:
fh.write(template_index.render(**content))
def main():
cmds()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import io
import shutil
import click
from cookiecutter.main import cookiecutter
from slugify import slugify
from jinja2 import Template, Environment, FileSystemLoader
from datetime import datetime
import markdown2
from .utils import walk_dir
@click.group()
def cmds():
pass
@cmds.command()
@click.argument('site_name', type=str)
def init(site_name):
click.echo(u'Hodge init new project...')
repo_name = site_name.lower().replace(' ', '-')
app_name = repo_name.replace("-", "")
extra_context = {
'site_name': site_name,
"repo_name": repo_name,
"app_name": app_name,
"pkg_name": app_name,
"content_folder": "content"
}
cookiecutter(
'https://github.com/avelino/hodge-init.git',
extra_context=extra_context,
no_input=True)
@cmds.command()
def newpost():
if not os.path.isfile("./hodge.toml"):
click.echo(u'hodge.toml (config) not exist!')
exit(0)
click.echo(u'Hodge new post create...')
date = datetime.now()
obj = {}
obj["title"] = click.prompt('Title', type=str)
slug = slugify(obj["title"])
obj["slug"] = click.prompt('Slug', type=str, default=slug)
obj["tags"] = click.prompt('Tags (hodge, static)', type=str,
default=", ".join(obj["title"].split(" ")))
obj["date"] = click.prompt(
'Date', type=str,
default=date.strftime("%Y/%m/%d %H:%M:%S"))
obj["file"] = click.prompt(
"File name",
type=str,
default=date.strftime("%Y_%m_%d_{}.md".format(obj["slug"])))
template_path = os.path.join(os.path.dirname(__file__), "templates")
tmp = Template(io.open(os.path.join(template_path, "newpost.md")).read())
if not os.path.isdir("./content"):
os.mkdir("./content")
with io.open("./content/{}".format(obj["file"]), "wb") as f:
f.write(tmp.render(**obj).encode())
@cmds.command()
def build():
if not os.path.isfile("./hodge.toml"):
click.echo(u'hodge.toml (config) not exist!')
exit(0)
click.echo(u'Hodge build...')
template_path = os.path.join("theme", "default")
env = Environment(autoescape=True,
loader=FileSystemLoader(template_path))
template = env.get_template('index.html')
shutil.rmtree("./build", ignore_errors=True)
for filename in walk_dir("./content"):
text = io.open(filename, "rb").read()
html = markdown2.markdown(text, extras=["metadata"])
meta = html.metadata
content = {"content": html, "meta": meta}
if not os.path.isdir("./build"):
os.mkdir("./build")
with open("./build/{}.html".format(meta.get("slug")), "w") as fh:
fh.write(template.render(**content))
click.echo("- {}".format(meta.get("slug")))
def main():
cmds()
| Python | 0.999996 |
f6c0258e257aa537dbb64bb8c5f10c87ec32dcf9 | Update my_hooks.py | hooks/my_hooks.py | hooks/my_hooks.py | #!/usr/bin/python
"""
Example Diaphora export hooks script. In this example script the following fake
scenario is considered:
1) There is a something-user.i64 database, for user-land stuff.
2) There is a something-kernel.i64 database, for kernel-land stuff.
3) We export all functions from the something-user.i64 database.
4) We only export from something-kernel.i64 the syscall_* or sys_* prefixed
functions.
5) In both databases there are constants referencing the build identifier but
they are different for both databases: BUILD-1000 in the user-land part and
BUILD-2000 in the kernel-land part. For making a perfect match based on the
constants found in both databases, we change the strings BUILD-XXX to the
generic string "BUILD-ID" for both databases.
"""
#-----------------------------------------------------------------------
FUNC_PREFIXES = ["syscall_", "sys_"]
BUILD_IDS = ["BUILD-1000", "BUILD-2000"]
#-----------------------------------------------------------------------
class CMyHooks:
def __init__(self, diaphora_obj):
""" @diaphora_obj is the CIDABinDiff object being used.
"""
self.diaphora = diaphora_obj
self.db_name = self.diaphora.db_name
def before_export_function(self, ea, func_name):
""" @ea is the address of the function that is going to be read.
Return True for the function to be read, or False to ignore it.
"""
# If the IDB name has the word 'user' on it, it's the user-land database for
# which we want to export everything.
if self.db_name.find("user") > -1:
return True
# Otherwise, it's the kernel-land IDB for which we only want to export the
# syscall functions.
if func_name:
# Is it a syscall?
for prefix in FUNC_PREFIXES:
if func_name.startswith(prefix):
return True
return False
def after_export_function(self, d):
""" @d is a dictionary with everything exported by Diaphora for the current
function. Transformations can be applied to the dictionary like changing
some strings or constants or whatever else. The function must return a
new dictionary with the modifications.
"""
# Search if any of the constants in the dictionary has the string "BUILD-*"
# and, if so, change it in the export process to a generic "BUILD-ID" string
# that will match more functions.
for build_id in BUILD_IDS:
for key in d:
if type(d[key]) is str:
if d[key].find(build_id) > -1:
d[key] = d[key].replace(build_id, "GENERIC-BUILD-ID")
return d
HOOKS = {"DiaphoraHooks": CMyHooks}
| #!/usr/bin/python
"""
Example Diaphora export hooks script. In this example script the following fake
scenario is considered:
1) There is a something-user.i64 database, for user-land stuff.
2) There is a something-kernel.i64 database, for kernel-land stuff.
3) We export all functions from the something-user.i64 database.
4) We only export from something-kernel.i64 the syscall_* or sys_* prefixed
functions.
5) In both databases there are constants referencing the build identifier but
they are different for both databases: BUILD-1000 in the user-land part and
BUILD-2000 in the kernel-land part. For making a perfect match based on the
constants found in both databases, we change the strings BUILD-XXX to the
generic string "BUILD-ID" for both databases.
"""
#-----------------------------------------------------------------------
FUNC_PREFIXES = ["syscall_", "sys_"]
BUILD_IDS = ["BUILD-1000", "BUILD-2000"]
#-----------------------------------------------------------------------
class CMyHooks:
def __init__(self, diaphora_obj):
""" @diaphora_obj is the CIDABinDiff object being used.
"""
self.diaphora = diaphora_obj
self.db_name = self.diaphora.db_name
def before_export_function(self, ea, func_name):
""" @ea is the address of the function that is going to be read.
Return True for the function to be read, or False to ignore it.
"""
# If the IDB name has the word 'user' on it, it's the user-land database for
# which we want to export everything.
if self.db_name.find("user") > -1:
return True
# Otherwise, it's the kernel-land IDB for which we only want to export the
# syscall functions.
if func_name:
# It is a syscall
if "syscall_%s" % func_name in SYSCALL_NAMES:
return True
return False
def after_export_function(self, d):
""" @d is a dictionary with everything exported by Diaphora for the current
function. Transformations can be applied to the dictionary like changing
some strings or constants or whatever else. The function must return a
new dictionary with the modifications.
"""
# Search if any of the constants in the dictionary has the string "BUILD-*"
# and, if so, change it in the export process to a generic "BUILD-ID" string
# that will match more functions.
for build_id in BUILD_IDS:
for key in d:
if type(d[key]) is str:
if d[key].find(build_id) > -1:
d[key] = d[key].replace(build_id, "GENERIC-BUILD-ID")
return d
HOOKS = {"DiaphoraHooks": CMyHooks}
| Python | 0.000001 |
4ab33cec7c0f4ee9fee7a7dce1c28466780b7074 | Add hoomd.box.Box to main namespace | hoomd/__init__.py | hoomd/__init__.py | # Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
""" HOOMD-blue python API
:py:mod:`hoomd` provides a high level user interface for defining and executing
simulations using HOOMD.
.. rubric:: API stability
:py:mod:`hoomd` is **stable**. When upgrading from version 3.x to 3.y (y > x),
existing job scripts that follow *documented* interfaces for functions and
classes will not require any modifications.
**Maintainer:** Joshua A. Anderson
"""
# Maintainer: joaander
import sys;
import ctypes;
import os;
# need to import HOOMD with RTLD_GLOBAL in python sitedir builds
if not ('NOT_HOOMD_PYTHON_SITEDIR' in os.environ):
flags = sys.getdlopenflags();
sys.setdlopenflags(flags | ctypes.RTLD_GLOBAL);
from hoomd import _hoomd;
if not ('NOT_HOOMD_PYTHON_SITEDIR' in os.environ):
sys.setdlopenflags(flags);
from hoomd import meta
from hoomd import context
from hoomd import cite
from hoomd import analyze
from hoomd import benchmark
from hoomd import comm
from hoomd import compute
from hoomd import data
from hoomd import dump
from hoomd import group
from hoomd import init
from hoomd import integrate
from hoomd import option
from hoomd import update
from hoomd import util
from hoomd import variant
from hoomd import lattice
from hoomd import device
try:
from hoomd import md
except ImportError:
pass
try:
from hoomd import hpmc
except ImportError:
pass
try:
from hoomd import dem
except ImportError:
pass
# TODO: enable this import after updating MPCD to the new API
# try:
# from hoomd import mpcd
# except ImportError:
# pass
from hoomd.simulation import Simulation
from hoomd.state import State
from hoomd.operations import Operations
from hoomd.snapshot import Snapshot
from hoomd.logger import Logger
from hoomd.box import Box
from hoomd import tuner
from hoomd._hoomd import WalltimeLimitReached;
_default_excepthook = sys.excepthook;
## \internal
# \brief Override pythons except hook to abort MPI runs
def _hoomd_sys_excepthook(type, value, traceback):
_default_excepthook(type, value, traceback);
sys.stderr.flush();
if context.current.device is not None:
_hoomd.abort_mpi(context.current.device.cpp_exec_conf);
sys.excepthook = _hoomd_sys_excepthook
__version__ = "{0}.{1}.{2}".format(*_hoomd.__version__)
| # Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
""" HOOMD-blue python API
:py:mod:`hoomd` provides a high level user interface for defining and executing
simulations using HOOMD.
.. rubric:: API stability
:py:mod:`hoomd` is **stable**. When upgrading from version 3.x to 3.y (y > x),
existing job scripts that follow *documented* interfaces for functions and
classes will not require any modifications.
**Maintainer:** Joshua A. Anderson
"""
# Maintainer: joaander
import sys;
import ctypes;
import os;
# need to import HOOMD with RTLD_GLOBAL in python sitedir builds
if not ('NOT_HOOMD_PYTHON_SITEDIR' in os.environ):
flags = sys.getdlopenflags();
sys.setdlopenflags(flags | ctypes.RTLD_GLOBAL);
from hoomd import _hoomd;
if not ('NOT_HOOMD_PYTHON_SITEDIR' in os.environ):
sys.setdlopenflags(flags);
from hoomd import meta
from hoomd import context
from hoomd import cite
from hoomd import analyze
from hoomd import benchmark
from hoomd import comm
from hoomd import compute
from hoomd import data
from hoomd import dump
from hoomd import group
from hoomd import init
from hoomd import integrate
from hoomd import option
from hoomd import update
from hoomd import util
from hoomd import variant
from hoomd import lattice
from hoomd import device
try:
from hoomd import md
except ImportError:
pass
try:
from hoomd import hpmc
except ImportError:
pass
try:
from hoomd import dem
except ImportError:
pass
# TODO: enable this import after updating MPCD to the new API
# try:
# from hoomd import mpcd
# except ImportError:
# pass
from hoomd.simulation import Simulation
from hoomd.state import State
from hoomd.operations import Operations
from hoomd.snapshot import Snapshot
from hoomd.logger import Logger
from hoomd import tuner
from hoomd._hoomd import WalltimeLimitReached;
_default_excepthook = sys.excepthook;
## \internal
# \brief Override pythons except hook to abort MPI runs
def _hoomd_sys_excepthook(type, value, traceback):
_default_excepthook(type, value, traceback);
sys.stderr.flush();
if context.current.device is not None:
_hoomd.abort_mpi(context.current.device.cpp_exec_conf);
sys.excepthook = _hoomd_sys_excepthook
__version__ = "{0}.{1}.{2}".format(*_hoomd.__version__)
| Python | 0.000003 |
c7ecf728e12dd3a59f4ef45e30b61ce5c52ceca5 | Fix corpus to Polish language | analysis/textclassification/bagofwords.py | analysis/textclassification/bagofwords.py | import functools
from nltk import ngrams
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
import nltk.corpus
import re
import definitions
INVALID_TOKEN_PATTERN = r'^[!%"%\*\(\)\+,&#-\.\$/\d:;\?\<\>\=@\[\]].*'
NEGATION_TOKEN_PATTERN = r'^nie$'
def get_stopwords_list():
return list(nltk.corpus.stopwords.words('polish'))
def filter_stopwords(words):
polish_stopwords = get_stopwords_list()
return [w for w in words if w not in polish_stopwords]
def filter_custom_set(words, custom_set):
r = re.compile(custom_set)
words = list(filter(lambda w: not r.match(w), words))
return words
def include_significant_bigrams(words, score_fn=BigramAssocMeasures.likelihood_ratio, n=100):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return list(words + bigrams)
def get_all_lowercase(words):
return [x.lower() for x in words]
def get_bag_of_words(words):
return dict([(word, True) for word in words])
def mark_negations(words):
add_negation_suffix = False
r_negation = re.compile(NEGATION_TOKEN_PATTERN)
r_stopword = re.compile(INVALID_TOKEN_PATTERN)
for index, item in enumerate(words):
if (r_stopword.match(item)):
add_negation_suffix = False
continue
if (r_negation.match(item)):
add_negation_suffix = True
continue
if (add_negation_suffix):
words[index] = words[index] + "_NEG"
return words
def get_processed_bag_of_words(text, lemmatizer, settings):
words = nltk.tokenize.word_tokenize(text, 'polish')
words = get_all_lowercase(words)
if lemmatizer is not None:
words = [lemmatizer.get_lemma(word) for word in words]
if (settings.FILTER_STOPWORDS):
words = filter_stopwords(words)
words = mark_negations(words)
words = filter_custom_set(words, INVALID_TOKEN_PATTERN)
if settings.MAX_FEATURES > 0:
words = words[:settings.MAX_FEATURES]
words = functools.reduce(lambda x, y: x + y,
[words if n == 1 else list([' '.join(ngram) for ngram in ngrams(words, n)]) for n in
range(1, settings.MAX_NGRAMS + 1)])
return get_bag_of_words(words)
| import functools
from nltk import ngrams
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
import nltk.corpus
import re
import definitions
INVALID_TOKEN_PATTERN = r'^[!%"%\*\(\)\+,&#-\.\$/\d:;\?\<\>\=@\[\]].*'
NEGATION_TOKEN_PATTERN = r'^nie$'
def get_stopwords_list():
return list(nltk.corpus.stopwords.words('polish'))
def filter_stopwords(words):
polish_stopwords = get_stopwords_list()
return [w for w in words if w not in polish_stopwords]
def filter_custom_set(words, custom_set):
r = re.compile(custom_set)
words = list(filter(lambda w: not r.match(w), words))
return words
def include_significant_bigrams(words, score_fn=BigramAssocMeasures.likelihood_ratio, n=100):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return list(words + bigrams)
def get_all_lowercase(words):
return [x.lower() for x in words]
def get_bag_of_words(words):
return dict([(word, True) for word in words])
def mark_negations(words):
add_negation_suffix = False
r_negation = re.compile(NEGATION_TOKEN_PATTERN)
r_stopword = re.compile(INVALID_TOKEN_PATTERN)
for index, item in enumerate(words):
if (r_stopword.match(item)):
add_negation_suffix = False
continue
if (r_negation.match(item)):
add_negation_suffix = True
continue
if (add_negation_suffix):
words[index] = words[index] + "_NEG"
return words
def get_processed_bag_of_words(text, lemmatizer, settings):
words = nltk.tokenize.word_tokenize(text)
words = get_all_lowercase(words)
if lemmatizer is not None:
words = [lemmatizer.get_lemma(word) for word in words]
if (settings.FILTER_STOPWORDS):
words = filter_stopwords(words)
words = mark_negations(words)
words = filter_custom_set(words, INVALID_TOKEN_PATTERN)
if settings.MAX_FEATURES > 0:
words = words[:settings.MAX_FEATURES]
words = functools.reduce(lambda x, y: x + y,
[words if n == 1 else list([' '.join(ngram) for ngram in ngrams(words, n)]) for n in
range(1, settings.MAX_NGRAMS + 1)])
return get_bag_of_words(words)
| Python | 0.999998 |
de027652a4bb12c6d1a4cb7bc85448c8c2a0d321 | use argparse to get arguments from command line | sortroms/__main__.py | sortroms/__main__.py | from sortroms import main
import argparse
parser = argparse.ArgumentParser(
description='Sort emulator ROM files',
prog='sortroms'
)
parser.add_argument(
'folder',
metavar='DIR',
type=str,
nargs='?',
help='The ROM folder to sort.'
)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| from sortroms import main
if __name__ == '__main__':
main()
| Python | 0.000001 |
56151ad549e82206c3894e8c945c00d4bac24ab5 | Complete error message. | source/memex/rest.py | source/memex/rest.py | from rest_framework import routers, serializers, viewsets, parsers, filters
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile, InMemoryUploadedFile
from base.models import Project
from apps.crawl_space.models import Crawl, CrawlModel
class SlugModelSerializer(serializers.ModelSerializer):
slug = serializers.SlugField(required=False, read_only=True)
class ProjectSerializer(SlugModelSerializer):
url = serializers.CharField(read_only=True)
class Meta:
model = Project
class CrawlSerializer(SlugModelSerializer):
# Expose these fields, but only as read only.
id = serializers.ReadOnlyField()
seeds_list = serializers.FileField(use_url=False)
status = serializers.CharField(read_only=True)
config = serializers.CharField(read_only=True)
index_name = serializers.CharField(read_only=True)
url = serializers.CharField(read_only=True)
pages_crawled = serializers.IntegerField(read_only=True)
harvest_rate = serializers.FloatField(read_only=True)
location = serializers.CharField(read_only=True)
def validate_crawler(self, value):
if value == "ache" and not self.initial_data.get("crawl_model"):
raise serializers.ValidationError("Ache crawls require a Crawl Model.")
return value
class Meta:
model = Crawl
class CrawlModelSerializer(SlugModelSerializer):
model = serializers.FileField(use_url=False)
features = serializers.FileField(use_url=False)
url = serializers.CharField(read_only=True)
class Meta:
model = CrawlModel
"""
Viewset Classes.
Filtering is provided by django-filter.
Backend settings are in common_settings.py under REST_FRAMEWORK. Setting is:
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
This backend is supplied to every viewset by default. Alter query fields by adding
or removing items from filter_fields
"""
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
filter_fields = ('id', 'slug', 'name',)
class CrawlViewSet(viewsets.ModelViewSet):
queryset = Crawl.objects.all()
serializer_class = CrawlSerializer
filter_fields = ('id', 'slug', 'name', 'description', 'status', 'project',
'crawl_model', 'crawler',)
def create(self, request):
if request.data.get('textseeds', False) and not request.FILES.get("seeds_list", False):
request.data["seeds_list"] = SimpleUploadedFile(
'seeds',
bytes(request.data.get("textseeds")),
'utf-8'
)
return super(CrawlViewSet, self).create(request)
class CrawlModelViewSet(viewsets.ModelViewSet):
queryset = CrawlModel.objects.all()
serializer_class = CrawlModelSerializer
filter_fields = ('id', 'slug', 'name', 'project',)
def destroy(self, request, pk=None):
crawls = Crawl.objects.all().filter(crawl_model=pk)
if crawls:
message = "The Crawl Model is being used by the following Crawls and cannot be deleted: "
raise serializers.ValidationError({message: [x.name for x in crawls]})
return super(CrawlModelViewSet, self).destroy(request)
router = routers.DefaultRouter()
router.register(r"projects", ProjectViewSet)
router.register(r"crawls", CrawlViewSet)
router.register(r"crawl_models", CrawlModelViewSet)
| from rest_framework import routers, serializers, viewsets, parsers, filters
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile, InMemoryUploadedFile
from base.models import Project
from apps.crawl_space.models import Crawl, CrawlModel
class SlugModelSerializer(serializers.ModelSerializer):
slug = serializers.SlugField(required=False, read_only=True)
class ProjectSerializer(SlugModelSerializer):
url = serializers.CharField(read_only=True)
class Meta:
model = Project
class CrawlSerializer(SlugModelSerializer):
# Expose these fields, but only as read only.
id = serializers.ReadOnlyField()
seeds_list = serializers.FileField(use_url=False)
status = serializers.CharField(read_only=True)
config = serializers.CharField(read_only=True)
index_name = serializers.CharField(read_only=True)
url = serializers.CharField(read_only=True)
pages_crawled = serializers.IntegerField(read_only=True)
harvest_rate = serializers.FloatField(read_only=True)
location = serializers.CharField(read_only=True)
def validate_crawler(self, value):
if value == "ache" and not self.initial_data.get("crawl_model"):
raise serializers.ValidationError("Ache crawls require a Crawl Model.")
return value
class Meta:
model = Crawl
class CrawlModelSerializer(SlugModelSerializer):
model = serializers.FileField(use_url=False)
features = serializers.FileField(use_url=False)
url = serializers.CharField(read_only=True)
class Meta:
model = CrawlModel
"""
Viewset Classes.
Filtering is provided by django-filter.
Backend settings are in common_settings.py under REST_FRAMEWORK. Setting is:
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
This backend is supplied to every viewset by default. Alter query fields by adding
or removing items from filter_fields
"""
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
filter_fields = ('id', 'slug', 'name',)
class CrawlViewSet(viewsets.ModelViewSet):
queryset = Crawl.objects.all()
serializer_class = CrawlSerializer
filter_fields = ('id', 'slug', 'name', 'description', 'status', 'project',
'crawl_model', 'crawler',)
def create(self, request):
if request.data.get('textseeds', False) and not request.FILES.get("seeds_list", False):
request.data["seeds_list"] = SimpleUploadedFile(
'seeds',
bytes(request.data.get("textseeds")),
'utf-8'
)
return super(CrawlViewSet, self).create(request)
class CrawlModelViewSet(viewsets.ModelViewSet):
queryset = CrawlModel.objects.all()
serializer_class = CrawlModelSerializer
filter_fields = ('id', 'slug', 'name', 'project',)
def destroy(self, request, pk=None):
crawls = Crawl.objects.all().filter(crawl_model=pk)
if crawls:
raise serializers.ValidationError("Cannot delete")
return super(CrawlModelViewSet, self).destroy(request)
router = routers.DefaultRouter()
router.register(r"projects", ProjectViewSet)
router.register(r"crawls", CrawlViewSet)
router.register(r"crawl_models", CrawlModelViewSet)
| Python | 0 |
25c242ed3352bcf52683454e12c3e9ae7e51622b | Check line endings for all modified files with 'binary' attr not set. | hooks.d/line_endings.py | hooks.d/line_endings.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab
#
# ==================================================================
#
# Copyright (c) 2016, Parallels IP Holdings GmbH
# Released under the terms of MIT license (see LICENSE for details)
#
# ==================================================================
#
'''
line_endings: A hook to deny commiting files with mixed line endings
'''
import logging
import hookutil
class Hook(object):
def __init__(self, repo_dir, settings, params):
self.repo_dir = repo_dir
self.settings = settings
self.params = params
def check(self, branch, old_sha, new_sha):
logging.debug("branch='%s', old_sha='%s', new_sha='%s', params='%s'",
branch, old_sha, new_sha, self.params)
permit = True
# Do not run the hook if the branch is being deleted
if new_sha == '0' * 40:
logging.debug("Deleting the branch, skip the hook")
return True, []
# Before the hook is run git has already created
# a new_sha commit object
log = hookutil.parse_git_log(self.repo_dir, branch, old_sha, new_sha)
messages = []
for commit in log:
modfiles = hookutil.parse_git_show(self.repo_dir, commit['commit'])
def has_mixed_le(file_contents):
'''
Check if file contains both lf and crlf
file_contents = open(file).read()
'''
if ('\r\n' in file_contents and
'\n' in file_contents.replace('\r\n', '')):
return True
return False
for modfile in modfiles:
# Skip deleted files
if modfile['status'] == 'D':
logging.debug("Deleted '%s', skip", modfile['path'])
continue
binary_attr = hookutil.get_attr(
self.repo_dir, new_sha, modfile['path'], 'binary')
if binary_attr != 'set':
cmd = ['git', 'show', modfile['new_blob']]
_, file_contents, _ = hookutil.run(cmd, self.repo_dir)
permit_file = not has_mixed_le(file_contents)
if not permit_file:
messages.append({'at': commit['commit'],
'text': "Error: file '%s' has mixed line endings (CRLF/LF)" % modfile['path']})
permit = permit and permit_file
logging.debug("modfile='%s', permit='%s'", modfile['path'], permit)
return permit, messages
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab
#
# ==================================================================
#
# Copyright (c) 2016, Parallels IP Holdings GmbH
# Released under the terms of MIT license (see LICENSE for details)
#
# ==================================================================
#
'''
line_endings: A hook to deny commiting files with mixed line endings
'''
import logging
import hookutil
class Hook(object):
def __init__(self, repo_dir, settings, params):
self.repo_dir = repo_dir
self.settings = settings
self.params = params
def check(self, branch, old_sha, new_sha):
logging.debug("branch='%s', old_sha='%s', new_sha='%s', params='%s'",
branch, old_sha, new_sha, self.params)
permit = True
# Do not run the hook if the branch is being deleted
if new_sha == '0' * 40:
logging.debug("Deleting the branch, skip the hook")
return True, []
# Before the hook is run git has already created
# a new_sha commit object
log = hookutil.parse_git_log(self.repo_dir, branch, old_sha, new_sha)
messages = []
for commit in log:
modfiles = hookutil.parse_git_show(self.repo_dir, commit['commit'])
def has_mixed_le(file_contents):
'''
Check if file contains both lf and crlf
file_contents = open(file).read()
'''
if ('\r\n' in file_contents and
'\n' in file_contents.replace('\r\n', '')):
return True
return False
for modfile in modfiles:
# Skip deleted files
if modfile['status'] == 'D':
logging.debug("Deleted '%s', skip", modfile['path'])
continue
text_attr = hookutil.get_attr(
self.repo_dir, new_sha, modfile['path'], 'text')
# Attr 'text' enables eol normalization, so
# the file won't have crlf when the attr is set
if text_attr == 'unspecified':
cmd = ['git', 'show', modfile['new_blob']]
_, file_contents, _ = hookutil.run(cmd, self.repo_dir)
permit_file = not has_mixed_le(file_contents)
if not permit_file:
messages.append({'at': commit['commit'],
'text': "Error: file '%s' has mixed line endings (CRLF/LF)" % modfile['path']})
permit = permit and permit_file
logging.debug("modfile='%s', permit='%s'", modfile['path'], permit)
return permit, messages
| Python | 0 |
19d81520a7fe9dd8098bd1603b455f08e465c5f7 | add getspire to init | hsadownload/__init__.py | hsadownload/__init__.py |
__all__ = ['access', 'getpacs', 'getspire']
from hsadownload import access, getpacs, getspire
|
__all__ = ['access', 'getpacs']
from hsadownload import access, getpacs
| Python | 0 |
54b40488a7b0baefba3ada33cf9b792af1c2ca4d | fix bug with api v1 | people/api.py | people/api.py | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from tastypie import fields
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from common.api import WebsiteResource
from .models import Artist, Staff, Organization
class UserResource(ModelResource):
class Meta:
queryset = User.objects.exclude(pk=-1) # Exclude anonymous user
detail_uri_name = 'username'
resource_name = 'people/user'
fields = ['username', 'first_name', 'last_name', 'id']
filtering = {
'first_name': ALL,
'last_name': ALL
}
def dehydrate(self, bundle):
if hasattr(bundle.obj, 'profile'):
bundle.data['photo'] = bundle.obj.profile.photo
bundle.data['birthdate'] = bundle.obj.profile.birthdate
bundle.data['birthplace'] = bundle.obj.profile.birthplace
bundle.data['cursus'] = bundle.obj.profile.cursus
bundle.data['gender'] = bundle.obj.profile.gender
# Nationality : country code separated by commas
bundle.data['nationality'] = bundle.obj.profile.nationality
bundle.data['homeland_country'] = bundle.obj.profile.homeland_country
bundle.data['birthplace_country'] = bundle.obj.profile.birthplace_country
return bundle
class ArtistResource(ModelResource):
class Meta:
queryset = Artist.objects.all()
resource_name = 'people/artist'
filtering = {
'user': ALL_WITH_RELATIONS,
'resource_uri': ALL
}
fields = ['id', 'nickname', 'bio_short_fr', 'bio_short_en',
'bio_fr', 'bio_en', 'twitter_account', 'facebook_profile']
websites = fields.ToManyField(WebsiteResource, 'websites', full=True)
user = fields.ForeignKey(UserResource, 'user', full=True)
artworks = fields.ToManyField('production.api.ArtworkResource', 'artworks',
full=False, null=True, use_in=['detail'])
class StaffResource(ModelResource):
class Meta:
queryset = Staff.objects.all()
resource_name = 'people/staff'
fields = ('user',)
user = fields.ForeignKey(UserResource, 'user', full=True)
class OrganizationResource(ModelResource):
class Meta:
queryset = Organization.objects.all()
resource_name = 'people/organization'
| # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from tastypie import fields
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from common.api import WebsiteResource
from .models import Artist, Staff, Organization
class UserResource(ModelResource):
class Meta:
queryset = User.objects.exclude(pk=-1) # Exclude anonymous user
detail_uri_name = 'username'
resource_name = 'people/user'
fields = ['username', 'first_name', 'last_name', 'id']
filtering = {
'first_name': ALL,
'last_name': ALL
}
def dehydrate(self, bundle):
bundle.data['photo'] = bundle.obj.profile.photo
bundle.data['birthdate'] = bundle.obj.profile.birthdate
bundle.data['birthplace'] = bundle.obj.profile.birthplace
bundle.data['cursus'] = bundle.obj.profile.cursus
bundle.data['gender'] = bundle.obj.profile.gender
# Nationality : country code separated by commas
bundle.data['nationality'] = bundle.obj.profile.nationality
bundle.data['homeland_country'] = bundle.obj.profile.homeland_country
bundle.data['birthplace_country'] = bundle.obj.profile.birthplace_country
return bundle
class ArtistResource(ModelResource):
class Meta:
queryset = Artist.objects.all()
resource_name = 'people/artist'
filtering = {
'user': ALL_WITH_RELATIONS,
'resource_uri': ALL
}
fields = ['id', 'nickname', 'bio_short_fr', 'bio_short_en',
'bio_fr', 'bio_en', 'twitter_account', 'facebook_profile']
websites = fields.ToManyField(WebsiteResource, 'websites', full=True)
user = fields.ForeignKey(UserResource, 'user', full=True)
artworks = fields.ToManyField('production.api.ArtworkResource', 'artworks',
full=False, null=True, use_in=['detail'])
class StaffResource(ModelResource):
class Meta:
queryset = Staff.objects.all()
resource_name = 'people/staff'
fields = ('user',)
user = fields.ForeignKey(UserResource, 'user', full=True)
class OrganizationResource(ModelResource):
class Meta:
queryset = Organization.objects.all()
resource_name = 'people/organization'
| Python | 0 |
59b2d0418c787066c37904816925dad15b0b45cf | Use author display name in document list_filter | scanblog/scanning/admin.py | scanblog/scanning/admin.py | from django.contrib import admin
from scanning.models import PendingScan, Document, DocumentPage, Scan, ScanPage, Transcription
class ScanPageInline(admin.TabularInline):
model = ScanPage
class ScanAdmin(admin.ModelAdmin):
model = Scan
inlines = [ScanPageInline]
admin.site.register(Scan, ScanAdmin)
class PendingScanAdmin(admin.ModelAdmin):
model = PendingScan
list_display = ('author', 'editor', 'code', 'created', 'completed')
search_fields = ('code',)
admin.site.register(PendingScan, PendingScanAdmin)
class DocumentAdmin(admin.ModelAdmin):
list_display = ['title', 'author', 'status', 'created']
search_fields = ['title', 'author__profile__display_name',
'body', 'transcription__revisions__body']
date_hierarchy = 'created'
list_filter = ['type', 'status', 'author__profile__managed',
'author__profile__display_name']
admin.site.register(Document, DocumentAdmin)
admin.site.register(DocumentPage)
admin.site.register(Transcription)
| from django.contrib import admin
from scanning.models import PendingScan, Document, DocumentPage, Scan, ScanPage, Transcription
class ScanPageInline(admin.TabularInline):
model = ScanPage
class ScanAdmin(admin.ModelAdmin):
model = Scan
inlines = [ScanPageInline]
admin.site.register(Scan, ScanAdmin)
class PendingScanAdmin(admin.ModelAdmin):
model = PendingScan
list_display = ('author', 'editor', 'code', 'created', 'completed')
search_fields = ('code',)
admin.site.register(PendingScan, PendingScanAdmin)
class DocumentAdmin(admin.ModelAdmin):
list_display = ['title', 'author', 'status', 'created']
search_fields = ['title', 'author__profile__display_name',
'body', 'transcription__revisions__body']
date_hierarchy = 'created'
list_filter = ['type', 'status', 'author', 'author__profile__managed']
admin.site.register(Document, DocumentAdmin)
admin.site.register(DocumentPage)
admin.site.register(Transcription)
| Python | 0.000001 |
3bc11eea2d629b316eb9a8bdf4d9c2a2c801ddf5 | Remove unused imports | whylog/tests/tests_front/tests_whylog_factory.py | whylog/tests/tests_front/tests_whylog_factory.py | from whylog.config.investigation_plan import LineSource
from whylog.front.whylog_factory import whylog_factory
from whylog.front.utils import FrontInput
from whylog.tests.utils import TestRemovingSettings
class TestWhylogFactory(TestRemovingSettings):
def tests_whylog_factory(self):
log_reader, teacher_generator = whylog_factory()
teacher = teacher_generator()
front_input = FrontInput(1, 'line content', LineSource('host', 'path'))
log_reader.get_causes(front_input)
teacher.add_line(0, front_input, True)
| from whylog.config.investigation_plan import LineSource
from whylog.front.whylog_factory import whylog_factory
from whylog.front.utils import FrontInput
from whylog.log_reader import LogReader
from whylog.teacher import Teacher
from whylog.tests.utils import TestRemovingSettings
class TestWhylogFactory(TestRemovingSettings):
def tests_whylog_factory(self):
log_reader, teacher_generator = whylog_factory()
teacher = teacher_generator()
front_input = FrontInput(1, 'line content', LineSource('host', 'path'))
log_reader.get_causes(front_input)
teacher.add_line(0, front_input, True)
| Python | 0.000001 |
b875084e74ee03c6b251a79f04f0db340bb356b8 | Fix #604 | scout/constants/indexes.py | scout/constants/indexes.py | from pymongo import (IndexModel, ASCENDING, DESCENDING)
INDEXES = {
'hgnc_collection': [
IndexModel([
('build', ASCENDING),
('chromosome', ASCENDING)],
name="build_chromosome"),
],
'variant_collection': [
IndexModel([
('case_id', ASCENDING),
('rank_score', DESCENDING)],
name="caseid_rankscore"),
IndexModel([
('case_id', ASCENDING),
('variant_rank', ASCENDING)],
name="caseid_variantrank"),
IndexModel([
('case_id', ASCENDING),
('category', ASCENDING),
('variant_type', ASCENDING),
('rank_score', DESCENDING)],
name="caseid_category_varianttype_rankscore"),
IndexModel([
('case_id', ASCENDING),
('variant_id', ASCENDING)],
name="caseid_variantid"),
IndexModel([
('case_id', ASCENDING),
('variant_type', ASCENDING),
('variant_rank', ASCENDING),
('panels', ASCENDING),
('thousand_genomes_frequency', ASCENDING)],
name="caseid_varianttype_variantrank_panels_thousandg")
],
}
| from pymongo import (IndexModel, ASCENDING, DESCENDING)
INDEXES = {
'hgnc_collection': [IndexModel(
[('build', ASCENDING), ('chromosome', ASCENDING)], name="build_chromosome"),
],
'variant_collection': [
IndexModel([('case_id', ASCENDING),('rank_score', DESCENDING)], name="caseid_rankscore"),
IndexModel([('case_id', ASCENDING),('variant_rank', ASCENDING)], name="caseid_variantrank")
]
}
| Python | 0.000001 |
87371774ad332a3adbe927e2609d73710f4a7678 | change method name | tests/graph/test_dag.py | tests/graph/test_dag.py | from pybbn.graph.edge import Edge, EdgeType
from pybbn.graph.node import Node
from pybbn.graph.dag import Dag
from nose import with_setup
def setup():
pass
def teardown():
pass
@with_setup(setup, teardown)
def test_dag_creation():
n0 = Node(0)
n1 = Node(1)
n2 = Node(2)
e0 = Edge(n0, n1, EdgeType.DIRECTED)
e1 = Edge(n1, n2, EdgeType.DIRECTED)
e2 = Edge(n2, n0, EdgeType.DIRECTED)
g = Dag()
g.add_node(n0)
g.add_node(n1)
g.add_edge(e0)
g.add_edge(e1)
g.add_edge(e2)
print(g)
assert len(g.get_nodes()) == 3
assert len(g.get_edges()) == 2
assert len(list(g.get_neighbors(0))) == 1
assert len(list(g.get_neighbors(1))) == 2
assert len(list(g.get_neighbors(2))) == 1
assert 1 in g.get_neighbors(0)
assert 0 in g.get_neighbors(1)
assert 2 in g.get_neighbors(1)
assert 1 in g.get_neighbors(2)
assert g.edge_exists(0, 1) == 1
assert g.edge_exists(1, 2) == 1
assert g.edge_exists(0, 2) == 0
assert len(g.get_parents(0)) == 0
assert len(g.get_parents(1)) == 1
assert len(g.get_parents(2)) == 1
assert 0 in g.get_parents(1)
assert 1 in g.get_parents(2)
assert len(g.get_children(0)) == 1
assert len(g.get_children(1)) == 1
assert len(g.get_children(2)) == 0
assert 1 in g.get_children(0)
assert 2 in g.get_children(1)
| from pybbn.graph.edge import Edge, EdgeType
from pybbn.graph.node import Node
from pybbn.graph.dag import Dag
from nose import with_setup
def setup():
pass
def teardown():
pass
@with_setup(setup, teardown)
def test_graph_creation():
n0 = Node(0)
n1 = Node(1)
n2 = Node(2)
e0 = Edge(n0, n1, EdgeType.DIRECTED)
e1 = Edge(n1, n2, EdgeType.DIRECTED)
e2 = Edge(n2, n0, EdgeType.DIRECTED)
g = Dag()
g.add_node(n0)
g.add_node(n1)
g.add_edge(e0)
g.add_edge(e1)
g.add_edge(e2)
print(g)
assert len(g.get_nodes()) == 3
assert len(g.get_edges()) == 2
assert len(list(g.get_neighbors(0))) == 1
assert len(list(g.get_neighbors(1))) == 2
assert len(list(g.get_neighbors(2))) == 1
assert 1 in g.get_neighbors(0)
assert 0 in g.get_neighbors(1)
assert 2 in g.get_neighbors(1)
assert 1 in g.get_neighbors(2)
assert g.edge_exists(0, 1) == 1
assert g.edge_exists(1, 2) == 1
assert g.edge_exists(0, 2) == 0
assert len(g.get_parents(0)) == 0
assert len(g.get_parents(1)) == 1
assert len(g.get_parents(2)) == 1
assert 0 in g.get_parents(1)
assert 1 in g.get_parents(2)
assert len(g.get_children(0)) == 1
assert len(g.get_children(1)) == 1
assert len(g.get_children(2)) == 0
assert 1 in g.get_children(0)
assert 2 in g.get_children(1)
| Python | 0.000021 |
3ad6fc495c877d36bb014062094f6e52edf1ac23 | allow bps or Bps | i3pystatus/net_speed.py | i3pystatus/net_speed.py | from i3pystatus import IntervalModule
import speedtest_cli
import requests
import time
import os
from urllib.parse import urlparse
import contextlib
import sys
from io import StringIO
class NetSpeed(IntervalModule):
"""
Attempts to provide an estimation of internet speeds.
Requires: speedtest_cli
"""
settings = (
("url", "Target URL to download a file from. Uses speedtest_cli to "
"find the 'best' server if none is supplied."),
("units", "Valid values are B, b, bytes, or bits"),
"format"
)
color = "#FFFFFF"
interval = 300
url = None
units = 'bits'
format = "{speed} ({hosting_provider})"
def run(self):
# since speedtest_cli likes to print crap, we need to squelch it
@contextlib.contextmanager
def nostdout():
save_stdout = sys.stdout
sys.stdout = StringIO()
yield
sys.stdout = save_stdout
if not self.url:
with nostdout():
try:
config = speedtest_cli.getConfig()
servers = speedtest_cli.closestServers(config['client'])
best = speedtest_cli.getBestServer(servers)
# 1500x1500 is about 4.3MB, which seems like a reasonable place to
# start, i guess...
url = '%s/random1500x1500.jpg' % os.path.dirname(best['url'])
except KeyError:
url = None
if not url:
cdict = {
"speed": 0,
"hosting_provider": 'null',
}
else:
with open('/dev/null', 'wb') as devnull:
start = time.time()
req = requests.get(url, stream=True)
devnull.write(req.content)
end = time.time()
total_length = int(req.headers.get('content-length'))
devnull.close()
# chop off the float after the 4th decimal point
# note: not rounding, simply cutting
# note: dl_time is in seconds
dl_time = float(end - start)
if self.units == 'bits' or self.units == 'b':
unit = 'bps'
kilo = 1000
mega = 1000000
giga = 1000000000
factor = 8
elif self.units == 'bytes' or self.units == 'B':
unit = 'Bps'
kilo = 8000
mega = 8000000
giga = 8000000000
factor = 1
if total_length < kilo:
bps = float(total_length / dl_time)
if total_length >= kilo and total_length < mega:
unit = "K" + unit
bps = float((total_length / 1024.0) / dl_time)
if total_length >= mega and total_length < giga:
unit = "M" + unit
bps = float((total_length / (1024.0 * 1024.0)) / dl_time)
if total_length >= giga:
unit = "G" + unit
bps = float((total_length / (1024.0 * 1024.0 * 1024.0)) / dl_time)
bps = "%.2f" % (bps * factor)
speed = "%s %s" % (bps, unit)
hosting_provider = '.'.join(urlparse(url).hostname.split('.')[-2:])
cdict = {
"speed": speed,
"hosting_provider": hosting_provider,
}
self.output = {
"full_text": self.format.format(**cdict),
"color": self.color
}
| from i3pystatus import IntervalModule
import speedtest_cli
import requests
import time
import os
from urllib.parse import urlparse
class NetSpeed(IntervalModule):
"""
Attempts to provide an estimation of internet speeds.
Requires: speedtest_cli
"""
settings = (
("url", "Target URL to download a file from. Uses speedtest_cli to "
"find the 'best' server if none is supplied."),
"format"
)
color = "#FFFFFF"
interval = 300
url = None
format = "{speed} ({hosting_provider})"
def run(self):
if not self.url:
config = speedtest_cli.getConfig()
servers = speedtest_cli.closestServers(config['client'])
best = speedtest_cli.getBestServer(servers)
# 1500x1500 is about 4.3MB, which seems like a reasonable place to
# start, i guess...
url = '%s/random1500x1500.jpg' % os.path.dirname(best['url'])
with open('/dev/null', 'wb') as devnull:
start = time.time()
req = requests.get(url, stream=True)
devnull.write(req.content)
end = time.time()
total_length = int(req.headers.get('content-length'))
devnull.close()
# chop off the float after the 4th decimal point
# note: not rounding, simply cutting
# note: dl_time is in seconds
dl_time = float(end - start)
if total_length < 999:
unit = "Bps"
bps = total_length / dl_time
if total_length >= 1000 < 999999:
unit = "KBps"
bps = (total_length / 1024.0) / dl_time
if total_length >= 1000000 < 999999999:
unit = "MBps"
bps = (total_length / (1024.0 * 1024.0)) / dl_time
if total_length >= 10000000:
unit = "GBps"
bps = (total_length / (1024.0 * 1024.0 * 1024.0)) / dl_time
bps = "%.2f" % bps
speed = "%s %s" % (bps, unit)
hosting_provider = '.'.join(urlparse(url).hostname.split('.')[-2:])
cdict = {
"speed": speed,
"hosting_provider": hosting_provider,
}
self.output = {
"full_text": self.format.format(**cdict),
"color": self.color
}
| Python | 0 |
0230de965fbc4247bf1f087f8454d09f30a6b0f3 | Fix naming convention of array references | angr/engines/soot/expressions/newArray.py | angr/engines/soot/expressions/newArray.py |
from .base import SimSootExpr
from ..values import SimSootValue_ArrayRef
import logging
l = logging.getLogger('angr.engines.soot.expressions.newarray')
class SimSootExpr_NewArray(SimSootExpr):
def __init__(self, expr, state):
super(SimSootExpr_NewArray, self).__init__(expr, state)
def _execute(self):
base_type = self.expr.base_type
size = self._translate_expr(self.expr.size).expr
self.expr = self.new_array(self.state, base_type, size)
@staticmethod
def new_array(state, base_type, size):
"""
Allocates a new array in memory and returns the reference of the base element.
"""
size_bounded = SimSootExpr_NewArray._bound_array_size(state, size)
# arrays are stored on the heap
# => create a unique reference
javavm_memory = state.get_javavm_view_of_plugin('memory')
heap_alloc_id = "{uuid}.{base_type}_array".format(base_type=base_type,
uuid=javavm_memory.get_new_uuid())
# return the reference of the base element
# => elements as such getting lazy initialized in the javavm memory
return SimSootValue_ArrayRef(heap_alloc_id, 0, base_type, size_bounded)
@staticmethod
def _bound_array_size(state, array_size):
# check if array size can exceed MAX_ARRAY_SIZE
javavm_memory = state.get_javavm_view_of_plugin('memory')
max_array_size = state.solver.BVV(javavm_memory.max_array_size, 32)
size_exceeds_maximum = state.solver.eval_upto(
max_array_size.SGE(array_size), 2
)
# overwrite size, if it *always* exceeds the maximum
if not True in size_exceeds_maximum:
l.warning("Array size %s always execeeds maximum. "
"It gets overwritten with the maximum %s."
% (array_size, max_array_size))
return max_array_size
# bound size, if it *can* exceeds the maximum
if True in size_exceeds_maximum and\
False in size_exceeds_maximum:
l.warning("Array size %s can execeed maximum. "
"It gets bounded with the maximum %s."
% (array_size, max_array_size))
state.solver.add(max_array_size.SGE(array_size))
return array_size |
from .base import SimSootExpr
from ..values import SimSootValue_ArrayRef
import logging
l = logging.getLogger('angr.engines.soot.expressions.newarray')
class SimSootExpr_NewArray(SimSootExpr):
def __init__(self, expr, state):
super(SimSootExpr_NewArray, self).__init__(expr, state)
def _execute(self):
base_type = self.expr.base_type
size = self._translate_expr(self.expr.size).expr
self.expr = self.new_array(self.state, base_type, size)
@staticmethod
def new_array(state, base_type, size):
"""
Allocates a new array in memory and returns the reference of the base element.
"""
size_bounded = SimSootExpr_NewArray._bound_array_size(state, size)
# arrays are stored on the heap
# => create a unique reference
javavm_memory = state.get_javavm_view_of_plugin('memory')
heap_alloc_id = "{type}_array_{uuid}".format(type=base_type,
uuid=javavm_memory.get_new_uuid())
# return the reference of the base element
# => elements as such getting lazy initialized in the javavm memory
return SimSootValue_ArrayRef(heap_alloc_id, 0, base_type, size_bounded)
@staticmethod
def _bound_array_size(state, array_size):
# check if array size can exceed MAX_ARRAY_SIZE
javavm_memory = state.get_javavm_view_of_plugin('memory')
max_array_size = state.solver.BVV(javavm_memory.max_array_size, 32)
size_exceeds_maximum = state.solver.eval_upto(
max_array_size.SGE(array_size), 2
)
# overwrite size, if it *always* exceeds the maximum
if not True in size_exceeds_maximum:
l.warning("Array size %s always execeeds maximum. "
"It gets overwritten with the maximum %s."
% (array_size, max_array_size))
return max_array_size
# bound size, if it *can* exceeds the maximum
if True in size_exceeds_maximum and\
False in size_exceeds_maximum:
l.warning("Array size %s can execeed maximum. "
"It gets bounded with the maximum %s."
% (array_size, max_array_size))
state.solver.add(max_array_size.SGE(array_size))
return array_size | Python | 0.000289 |